mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
13
vendor/k8s.io/kubernetes/pkg/kubemark/.import-restrictions
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/pkg/kubemark/.import-restrictions
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"Rules": [
|
||||
{
|
||||
"SelectorRegexp": "k8s[.]io/kubernetes/cmd",
|
||||
"AllowedPrefixes": [
|
||||
"k8s.io/kubernetes/cmd/kube-proxy/app",
|
||||
"k8s.io/kubernetes/cmd/kubelet/app"
|
||||
],
|
||||
"ForbiddenPrefixes": [
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
71
vendor/k8s.io/kubernetes/pkg/kubemark/BUILD
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/kubemark/BUILD
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"controller.go",
|
||||
"hollow_kubelet.go",
|
||||
"hollow_proxy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubemark",
|
||||
deps = [
|
||||
"//cmd/kube-proxy/app:go_default_library",
|
||||
"//cmd/kubelet/app:go_default_library",
|
||||
"//cmd/kubelet/app/options:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/dockershim:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/config:go_default_library",
|
||||
"//pkg/proxy/iptables:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/secret:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/pkg/kubemark/OWNERS
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/pkg/kubemark/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
reviewers:
|
||||
- gmarek
|
||||
- shyamjvs
|
||||
- wojtek-t
|
||||
approvers:
|
||||
- gmarek
|
||||
- shyamjvs
|
||||
- wojtek-t
|
401
vendor/k8s.io/kubernetes/pkg/kubemark/controller.go
generated
vendored
Normal file
401
vendor/k8s.io/kubernetes/pkg/kubemark/controller.go
generated
vendored
Normal file
@ -0,0 +1,401 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubemark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/informers"
|
||||
informersv1 "k8s.io/client-go/informers/core/v1"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
namespaceKubemark = "kubemark"
|
||||
nodeGroupLabel = "autoscaling.k8s.io/nodegroup"
|
||||
numRetries = 3
|
||||
)
|
||||
|
||||
// KubemarkController is a simplified version of cloud provider for kubemark. It allows
|
||||
// to add and delete nodes from a kubemark cluster and introduces nodegroups
|
||||
// by applying labels to the kubemark's hollow-nodes.
|
||||
type KubemarkController struct {
|
||||
nodeTemplate *apiv1.ReplicationController
|
||||
externalCluster externalCluster
|
||||
kubemarkCluster kubemarkCluster
|
||||
rand *rand.Rand
|
||||
createNodeQueue chan string
|
||||
nodeGroupQueueSize map[string]int
|
||||
nodeGroupQueueSizeLock sync.Mutex
|
||||
}
|
||||
|
||||
// externalCluster is used to communicate with the external cluster that hosts
|
||||
// kubemark, in order to be able to list, create and delete hollow nodes
|
||||
// by manipulating the replication controllers.
|
||||
type externalCluster struct {
|
||||
rcLister listersv1.ReplicationControllerLister
|
||||
rcSynced cache.InformerSynced
|
||||
podLister listersv1.PodLister
|
||||
podSynced cache.InformerSynced
|
||||
client kubeclient.Interface
|
||||
}
|
||||
|
||||
// kubemarkCluster is used to delete nodes from kubemark cluster once their
|
||||
// respective replication controllers have been deleted and the nodes have
|
||||
// become unready. This is to cover for the fact that there is no proper cloud
|
||||
// provider for kubemark that would care for deleting the nodes.
|
||||
type kubemarkCluster struct {
|
||||
client kubeclient.Interface
|
||||
nodeLister listersv1.NodeLister
|
||||
nodeSynced cache.InformerSynced
|
||||
nodesToDelete map[string]bool
|
||||
nodesToDeleteLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewKubemarkController creates KubemarkController using the provided clients to talk to external
|
||||
// and kubemark clusters.
|
||||
func NewKubemarkController(externalClient kubeclient.Interface, externalInformerFactory informers.SharedInformerFactory,
|
||||
kubemarkClient kubeclient.Interface, kubemarkNodeInformer informersv1.NodeInformer) (*KubemarkController, error) {
|
||||
rcInformer := externalInformerFactory.InformerFor(&apiv1.ReplicationController{}, newReplicationControllerInformer)
|
||||
podInformer := externalInformerFactory.InformerFor(&apiv1.Pod{}, newPodInformer)
|
||||
controller := &KubemarkController{
|
||||
externalCluster: externalCluster{
|
||||
rcLister: listersv1.NewReplicationControllerLister(rcInformer.GetIndexer()),
|
||||
rcSynced: rcInformer.HasSynced,
|
||||
podLister: listersv1.NewPodLister(podInformer.GetIndexer()),
|
||||
podSynced: podInformer.HasSynced,
|
||||
client: externalClient,
|
||||
},
|
||||
kubemarkCluster: kubemarkCluster{
|
||||
nodeLister: kubemarkNodeInformer.Lister(),
|
||||
nodeSynced: kubemarkNodeInformer.Informer().HasSynced,
|
||||
client: kubemarkClient,
|
||||
nodesToDelete: make(map[string]bool),
|
||||
nodesToDeleteLock: sync.Mutex{},
|
||||
},
|
||||
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
|
||||
createNodeQueue: make(chan string, 1000),
|
||||
nodeGroupQueueSize: make(map[string]int),
|
||||
nodeGroupQueueSizeLock: sync.Mutex{},
|
||||
}
|
||||
|
||||
kubemarkNodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: controller.kubemarkCluster.removeUnneededNodes,
|
||||
})
|
||||
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits until all caches in the controller are populated.
|
||||
func (kubemarkController *KubemarkController) WaitForCacheSync(stopCh chan struct{}) bool {
|
||||
return controller.WaitForCacheSync("kubemark", stopCh,
|
||||
kubemarkController.externalCluster.rcSynced,
|
||||
kubemarkController.externalCluster.podSynced,
|
||||
kubemarkController.kubemarkCluster.nodeSynced)
|
||||
}
|
||||
|
||||
// Run populates the node template needed for creation of kubemark nodes and
|
||||
// starts the worker routine for creating new nodes.
|
||||
func (kubemarkController *KubemarkController) Run(stopCh chan struct{}) {
|
||||
nodeTemplate, err := kubemarkController.getNodeTemplate()
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get node template: %s", err)
|
||||
}
|
||||
kubemarkController.nodeTemplate = nodeTemplate
|
||||
|
||||
go kubemarkController.runNodeCreation(stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// GetNodeNamesForNodeGroup returns list of the nodes in the node group.
|
||||
func (kubemarkController *KubemarkController) GetNodeNamesForNodeGroup(nodeGroup string) ([]string, error) {
|
||||
selector := labels.SelectorFromSet(labels.Set{nodeGroupLabel: nodeGroup})
|
||||
pods, err := kubemarkController.externalCluster.podLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]string, 0, len(pods))
|
||||
for _, pod := range pods {
|
||||
result = append(result, pod.ObjectMeta.Name)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetNodeGroupSize returns the current size for the node group as observed.
|
||||
func (kubemarkController *KubemarkController) GetNodeGroupSize(nodeGroup string) (int, error) {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{nodeGroupLabel: nodeGroup}))
|
||||
nodes, err := kubemarkController.externalCluster.rcLister.List(selector)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(nodes), nil
|
||||
}
|
||||
|
||||
// GetNodeGroupTargetSize returns the size of the node group as a sum of current
|
||||
// observed size and number of upcoming nodes.
|
||||
func (kubemarkController *KubemarkController) GetNodeGroupTargetSize(nodeGroup string) (int, error) {
|
||||
kubemarkController.nodeGroupQueueSizeLock.Lock()
|
||||
defer kubemarkController.nodeGroupQueueSizeLock.Unlock()
|
||||
realSize, err := kubemarkController.GetNodeGroupSize(nodeGroup)
|
||||
if err != nil {
|
||||
return realSize, err
|
||||
}
|
||||
return realSize + kubemarkController.nodeGroupQueueSize[nodeGroup], nil
|
||||
}
|
||||
|
||||
// SetNodeGroupSize changes the size of node group by adding or removing nodes.
|
||||
func (kubemarkController *KubemarkController) SetNodeGroupSize(nodeGroup string, size int) error {
|
||||
currSize, err := kubemarkController.GetNodeGroupTargetSize(nodeGroup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch delta := size - currSize; {
|
||||
case delta < 0:
|
||||
absDelta := -delta
|
||||
nodes, err := kubemarkController.GetNodeNamesForNodeGroup(nodeGroup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(nodes) < absDelta {
|
||||
return fmt.Errorf("can't remove %d nodes from %s nodegroup, not enough nodes: %d", absDelta, nodeGroup, len(nodes))
|
||||
}
|
||||
for i, node := range nodes {
|
||||
if i == absDelta {
|
||||
return nil
|
||||
}
|
||||
if err := kubemarkController.RemoveNodeFromNodeGroup(nodeGroup, node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case delta > 0:
|
||||
kubemarkController.nodeGroupQueueSizeLock.Lock()
|
||||
for i := 0; i < delta; i++ {
|
||||
kubemarkController.nodeGroupQueueSize[nodeGroup]++
|
||||
kubemarkController.createNodeQueue <- nodeGroup
|
||||
}
|
||||
kubemarkController.nodeGroupQueueSizeLock.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGroupForNode returns the name of the node group to which the node
|
||||
// belongs.
|
||||
func (kubemarkController *KubemarkController) GetNodeGroupForNode(node string) (string, error) {
|
||||
pod := kubemarkController.getPodByName(node)
|
||||
if pod == nil {
|
||||
return "", fmt.Errorf("node %s does not exist", node)
|
||||
}
|
||||
nodeGroup, ok := pod.ObjectMeta.Labels[nodeGroupLabel]
|
||||
if ok {
|
||||
return nodeGroup, nil
|
||||
}
|
||||
return "", fmt.Errorf("can't find nodegroup for node %s due to missing label %s", node, nodeGroupLabel)
|
||||
}
|
||||
|
||||
func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup string) error {
|
||||
node := kubemarkController.nodeTemplate.DeepCopy()
|
||||
node.Name = fmt.Sprintf("%s-%d", nodeGroup, kubemarkController.rand.Int63())
|
||||
node.Labels = map[string]string{nodeGroupLabel: nodeGroup, "name": node.Name}
|
||||
node.Spec.Template.Labels = node.Labels
|
||||
|
||||
var err error
|
||||
for i := 0; i < numRetries; i++ {
|
||||
_, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(node)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup string, node string) error {
|
||||
pod := kubemarkController.getPodByName(node)
|
||||
if pod == nil {
|
||||
glog.Warningf("Can't delete node %s from nodegroup %s. Node does not exist.", node, nodeGroup)
|
||||
return nil
|
||||
}
|
||||
if pod.ObjectMeta.Labels[nodeGroupLabel] != nodeGroup {
|
||||
return fmt.Errorf("can't delete node %s from nodegroup %s. Node is not in nodegroup", node, nodeGroup)
|
||||
}
|
||||
policy := metav1.DeletePropagationForeground
|
||||
var err error
|
||||
for i := 0; i < numRetries; i++ {
|
||||
err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete(
|
||||
pod.ObjectMeta.Labels["name"],
|
||||
&metav1.DeleteOptions{PropagationPolicy: &policy})
|
||||
if err == nil {
|
||||
glog.Infof("marking node %s for deletion", node)
|
||||
// Mark node for deletion from kubemark cluster.
|
||||
// Once it becomes unready after replication controller
|
||||
// deletion has been noticed, we will delete it explicitly.
|
||||
// This is to cover for the fact that kubemark does not
|
||||
// take care of this itself.
|
||||
kubemarkController.kubemarkCluster.markNodeForDeletion(node)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Failed to delete node %s: %v", node, err)
|
||||
}
|
||||
|
||||
func (kubemarkController *KubemarkController) getReplicationControllerByName(name string) *apiv1.ReplicationController {
|
||||
rcs, err := kubemarkController.externalCluster.rcLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for _, rc := range rcs {
|
||||
if rc.ObjectMeta.Name == name {
|
||||
return rc
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kubemarkController *KubemarkController) getPodByName(name string) *apiv1.Pod {
|
||||
pods, err := kubemarkController.externalCluster.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.ObjectMeta.Name == name {
|
||||
return pod
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kubemarkController *KubemarkController) getNodeNameForPod(podName string) (string, error) {
|
||||
pods, err := kubemarkController.externalCluster.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.ObjectMeta.Name == podName {
|
||||
return pod.Labels["name"], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("pod %s not found", podName)
|
||||
}
|
||||
|
||||
// getNodeTemplate returns the template for hollow node replication controllers
|
||||
// by looking for an existing hollow node specification. This requires at least
|
||||
// one kubemark node to be present on startup.
|
||||
func (kubemarkController *KubemarkController) getNodeTemplate() (*apiv1.ReplicationController, error) {
|
||||
podName, err := kubemarkController.kubemarkCluster.getHollowNodeName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hollowNodeName, err := kubemarkController.getNodeNameForPod(podName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hollowNode := kubemarkController.getReplicationControllerByName(hollowNodeName); hollowNode != nil {
|
||||
nodeTemplate := &apiv1.ReplicationController{
|
||||
Spec: apiv1.ReplicationControllerSpec{
|
||||
Template: hollowNode.Spec.Template,
|
||||
},
|
||||
}
|
||||
|
||||
nodeTemplate.Spec.Selector = nil
|
||||
nodeTemplate.Namespace = namespaceKubemark
|
||||
one := int32(1)
|
||||
nodeTemplate.Spec.Replicas = &one
|
||||
|
||||
return nodeTemplate, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't get hollow node template")
|
||||
}
|
||||
|
||||
func (kubemarkController *KubemarkController) runNodeCreation(stop <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case nodeGroup := <-kubemarkController.createNodeQueue:
|
||||
kubemarkController.nodeGroupQueueSizeLock.Lock()
|
||||
err := kubemarkController.addNodeToNodeGroup(nodeGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to add node to node group %s: %v", nodeGroup, err)
|
||||
} else {
|
||||
kubemarkController.nodeGroupQueueSize[nodeGroup]--
|
||||
}
|
||||
kubemarkController.nodeGroupQueueSizeLock.Unlock()
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (kubemarkCluster *kubemarkCluster) getHollowNodeName() (string, error) {
|
||||
nodes, err := kubemarkCluster.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
return node.Name, nil
|
||||
}
|
||||
return "", fmt.Errorf("did not find any hollow nodes in the cluster")
|
||||
}
|
||||
|
||||
func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, newObj interface{}) {
|
||||
node, ok := newObj.(*apiv1.Node)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, condition := range node.Status.Conditions {
|
||||
// Delete node if it is in unready state, and it has been
|
||||
// explicitly marked for deletion.
|
||||
if condition.Type == apiv1.NodeReady && condition.Status != apiv1.ConditionTrue {
|
||||
kubemarkCluster.nodesToDeleteLock.Lock()
|
||||
defer kubemarkCluster.nodesToDeleteLock.Unlock()
|
||||
if kubemarkCluster.nodesToDelete[node.Name] {
|
||||
kubemarkCluster.nodesToDelete[node.Name] = false
|
||||
if err := kubemarkCluster.client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (kubemarkCluster *kubemarkCluster) markNodeForDeletion(name string) {
|
||||
kubemarkCluster.nodesToDeleteLock.Lock()
|
||||
defer kubemarkCluster.nodesToDeleteLock.Unlock()
|
||||
kubemarkCluster.nodesToDelete[name] = true
|
||||
}
|
||||
|
||||
func newReplicationControllerInformer(kubeClient kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
rcListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "replicationcontrollers", namespaceKubemark, fields.Everything())
|
||||
return cache.NewSharedIndexInformer(rcListWatch, &apiv1.ReplicationController{}, resyncPeriod, nil)
|
||||
}
|
||||
|
||||
func newPodInformer(kubeClient kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", namespaceKubemark, fields.Everything())
|
||||
return cache.NewSharedIndexInformer(podListWatch, &apiv1.Pod{}, resyncPeriod, nil)
|
||||
}
|
163
vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go
generated
vendored
Normal file
163
vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubemark
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletapp "k8s.io/kubernetes/cmd/kubelet/app"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeio "k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
"k8s.io/kubernetes/pkg/volume/secret"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type HollowKubelet struct {
|
||||
KubeletFlags *options.KubeletFlags
|
||||
KubeletConfiguration *kubeletconfig.KubeletConfiguration
|
||||
KubeletDeps *kubelet.Dependencies
|
||||
}
|
||||
|
||||
func NewHollowKubelet(
|
||||
nodeName string,
|
||||
client *clientset.Clientset,
|
||||
cadvisorInterface cadvisor.Interface,
|
||||
dockerClientConfig *dockershim.ClientConfig,
|
||||
kubeletPort, kubeletReadOnlyPort int,
|
||||
containerManager cm.ContainerManager,
|
||||
maxPods int, podsPerCore int,
|
||||
) *HollowKubelet {
|
||||
// -----------------
|
||||
// Static config
|
||||
// -----------------
|
||||
f, c := GetHollowKubeletConfig(nodeName, kubeletPort, kubeletReadOnlyPort, maxPods, podsPerCore)
|
||||
|
||||
// -----------------
|
||||
// Injected objects
|
||||
// -----------------
|
||||
volumePlugins := empty_dir.ProbeVolumePlugins()
|
||||
volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...)
|
||||
d := &kubelet.Dependencies{
|
||||
KubeClient: client,
|
||||
HeartbeatClient: client.CoreV1(),
|
||||
DockerClientConfig: dockerClientConfig,
|
||||
CAdvisorInterface: cadvisorInterface,
|
||||
Cloud: nil,
|
||||
OSInterface: &containertest.FakeOS{},
|
||||
ContainerManager: containerManager,
|
||||
VolumePlugins: volumePlugins,
|
||||
TLSOptions: nil,
|
||||
OOMAdjuster: oom.NewFakeOOMAdjuster(),
|
||||
Writer: &kubeio.StdWriter{},
|
||||
Mounter: mount.New("" /* default mount path */),
|
||||
}
|
||||
|
||||
return &HollowKubelet{
|
||||
KubeletFlags: f,
|
||||
KubeletConfiguration: c,
|
||||
KubeletDeps: d,
|
||||
}
|
||||
}
|
||||
|
||||
// Starts this HollowKubelet and blocks.
|
||||
func (hk *HollowKubelet) Run() {
|
||||
if err := kubeletapp.RunKubelet(hk.KubeletFlags, hk.KubeletConfiguration, hk.KubeletDeps, false); err != nil {
|
||||
glog.Fatalf("Failed to run HollowKubelet: %v. Exiting.", err)
|
||||
}
|
||||
select {}
|
||||
}
|
||||
|
||||
// Builds a KubeletConfiguration for the HollowKubelet, ensuring that the
|
||||
// usual defaults are applied for fields we do not override.
|
||||
func GetHollowKubeletConfig(
|
||||
nodeName string,
|
||||
kubeletPort int,
|
||||
kubeletReadOnlyPort int,
|
||||
maxPods int,
|
||||
podsPerCore int) (*options.KubeletFlags, *kubeletconfig.KubeletConfiguration) {
|
||||
|
||||
testRootDir := utils.MakeTempDirOrDie("hollow-kubelet.", "")
|
||||
manifestFilePath := utils.MakeTempDirOrDie("manifest", testRootDir)
|
||||
glog.Infof("Using %s as root dir for hollow-kubelet", testRootDir)
|
||||
|
||||
// Flags struct
|
||||
f := options.NewKubeletFlags()
|
||||
f.RootDirectory = testRootDir
|
||||
f.HostnameOverride = nodeName
|
||||
f.MinimumGCAge = metav1.Duration{Duration: 1 * time.Minute}
|
||||
f.MaxContainerCount = 100
|
||||
f.MaxPerPodContainerCount = 2
|
||||
f.RegisterNode = true
|
||||
f.RegisterSchedulable = true
|
||||
|
||||
// Config struct
|
||||
c, err := options.NewKubeletConfiguration()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c.ManifestURL = ""
|
||||
c.Address = "0.0.0.0" /* bind address */
|
||||
c.Port = int32(kubeletPort)
|
||||
c.ReadOnlyPort = int32(kubeletReadOnlyPort)
|
||||
c.PodManifestPath = manifestFilePath
|
||||
c.FileCheckFrequency.Duration = 20 * time.Second
|
||||
c.HTTPCheckFrequency.Duration = 20 * time.Second
|
||||
c.NodeStatusUpdateFrequency.Duration = 10 * time.Second
|
||||
c.SyncFrequency.Duration = 10 * time.Second
|
||||
c.EvictionPressureTransitionPeriod.Duration = 5 * time.Minute
|
||||
c.MaxPods = int32(maxPods)
|
||||
c.PodsPerCore = int32(podsPerCore)
|
||||
c.ClusterDNS = []string{}
|
||||
c.ImageGCHighThresholdPercent = 90
|
||||
c.ImageGCLowThresholdPercent = 80
|
||||
c.VolumeStatsAggPeriod.Duration = time.Minute
|
||||
c.CgroupRoot = ""
|
||||
c.CPUCFSQuota = true
|
||||
c.EnableControllerAttachDetach = false
|
||||
c.EnableDebuggingHandlers = true
|
||||
c.EnableServer = true
|
||||
c.CgroupsPerQOS = false
|
||||
// hairpin-veth is used to allow hairpin packets. Note that this deviates from
|
||||
// what the "real" kubelet currently does, because there's no way to
|
||||
// set promiscuous mode on docker0.
|
||||
c.HairpinMode = kubeletconfig.HairpinVeth
|
||||
c.MaxOpenFiles = 1024
|
||||
c.RegistryBurst = 10
|
||||
c.RegistryPullQPS = 5.0
|
||||
c.ResolverConfig = kubetypes.ResolvConfDefault
|
||||
c.KubeletCgroups = "/kubelet"
|
||||
c.SerializeImagePulls = true
|
||||
c.SystemCgroups = ""
|
||||
c.ProtectKernelDefaults = false
|
||||
|
||||
return f, c
|
||||
}
|
153
vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go
generated
vendored
Normal file
153
vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubemark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
||||
"k8s.io/kubernetes/pkg/proxy/iptables"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type HollowProxy struct {
|
||||
ProxyServer *proxyapp.ProxyServer
|
||||
}
|
||||
|
||||
type FakeProxier struct{}
|
||||
|
||||
func (*FakeProxier) Sync() {}
|
||||
func (*FakeProxier) SyncLoop() {
|
||||
select {}
|
||||
}
|
||||
func (*FakeProxier) OnServiceAdd(service *api.Service) {}
|
||||
func (*FakeProxier) OnServiceUpdate(oldService, service *api.Service) {}
|
||||
func (*FakeProxier) OnServiceDelete(service *api.Service) {}
|
||||
func (*FakeProxier) OnServiceSynced() {}
|
||||
func (*FakeProxier) OnEndpointsAdd(endpoints *api.Endpoints) {}
|
||||
func (*FakeProxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {}
|
||||
func (*FakeProxier) OnEndpointsDelete(endpoints *api.Endpoints) {}
|
||||
func (*FakeProxier) OnEndpointsSynced() {}
|
||||
|
||||
func NewHollowProxyOrDie(
|
||||
nodeName string,
|
||||
client clientset.Interface,
|
||||
eventClient v1core.EventsGetter,
|
||||
iptInterface utiliptables.Interface,
|
||||
sysctl utilsysctl.Interface,
|
||||
execer utilexec.Interface,
|
||||
broadcaster record.EventBroadcaster,
|
||||
recorder record.EventRecorder,
|
||||
useRealProxier bool,
|
||||
) (*HollowProxy, error) {
|
||||
// Create proxier and service/endpoint handlers.
|
||||
var proxier proxy.ProxyProvider
|
||||
var serviceHandler proxyconfig.ServiceHandler
|
||||
var endpointsHandler proxyconfig.EndpointsHandler
|
||||
|
||||
if useRealProxier {
|
||||
// Real proxier with fake iptables, sysctl, etc underneath it.
|
||||
//var err error
|
||||
proxierIPTables, err := iptables.NewProxier(
|
||||
iptInterface,
|
||||
sysctl,
|
||||
execer,
|
||||
30*time.Second,
|
||||
5*time.Second,
|
||||
false,
|
||||
0,
|
||||
"10.0.0.0/8",
|
||||
nodeName,
|
||||
getNodeIP(client, nodeName),
|
||||
recorder,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
||||
}
|
||||
proxier = proxierIPTables
|
||||
serviceHandler = proxierIPTables
|
||||
endpointsHandler = proxierIPTables
|
||||
} else {
|
||||
proxier = &FakeProxier{}
|
||||
serviceHandler = &FakeProxier{}
|
||||
endpointsHandler = &FakeProxier{}
|
||||
}
|
||||
|
||||
// Create a Hollow Proxy instance.
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
UID: types.UID(nodeName),
|
||||
Namespace: "",
|
||||
}
|
||||
return &HollowProxy{
|
||||
ProxyServer: &proxyapp.ProxyServer{
|
||||
Client: client,
|
||||
EventClient: eventClient,
|
||||
IptInterface: iptInterface,
|
||||
Proxier: proxier,
|
||||
Broadcaster: broadcaster,
|
||||
Recorder: recorder,
|
||||
ProxyMode: "fake",
|
||||
NodeRef: nodeRef,
|
||||
OOMScoreAdj: utilpointer.Int32Ptr(0),
|
||||
ResourceContainer: "",
|
||||
ConfigSyncPeriod: 30 * time.Second,
|
||||
ServiceEventHandler: serviceHandler,
|
||||
EndpointsEventHandler: endpointsHandler,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hp *HollowProxy) Run() {
|
||||
if err := hp.ProxyServer.Run(); err != nil {
|
||||
glog.Fatalf("Error while running proxy: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeIP(client clientset.Interface, hostname string) net.IP {
|
||||
var nodeIP net.IP
|
||||
node, err := client.Core().Nodes().Get(hostname, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to retrieve node info: %v", err)
|
||||
return nil
|
||||
}
|
||||
nodeIP, err = utilnode.InternalGetNodeHostIP(node)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to retrieve node IP: %v", err)
|
||||
return nil
|
||||
}
|
||||
return nodeIP
|
||||
}
|
Reference in New Issue
Block a user