mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
54
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
@ -19,21 +19,21 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -46,18 +46,18 @@ go_test(
|
||||
"//pkg/api/v1/endpoints:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
@ -42,7 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -68,10 +68,6 @@ const (
|
||||
TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
|
||||
)
|
||||
|
||||
var (
|
||||
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
// NewEndpointController returns a new *EndpointController.
|
||||
func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
|
||||
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface) *EndpointController {
|
||||
@ -150,8 +146,8 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer e.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting endpoint controller")
|
||||
defer glog.Infof("Shutting down endpoint controller")
|
||||
klog.Infof("Starting endpoint controller")
|
||||
defer klog.Infof("Shutting down endpoint controller")
|
||||
|
||||
if !controller.WaitForCacheSync("endpoint", stopCh, e.podsSynced, e.servicesSynced, e.endpointsSynced) {
|
||||
return
|
||||
@ -178,7 +174,7 @@ func (e *EndpointController) getPodServiceMemberships(pod *v1.Pod) (sets.String,
|
||||
return set, nil
|
||||
}
|
||||
for i := range services {
|
||||
key, err := keyFunc(services[i])
|
||||
key, err := controller.KeyFunc(services[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -328,13 +324,13 @@ func (e *EndpointController) deletePod(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod: %#v", obj))
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name)
|
||||
klog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name)
|
||||
e.addPod(pod)
|
||||
}
|
||||
|
||||
// obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
|
||||
func (e *EndpointController) enqueueService(obj interface{}) {
|
||||
key, err := keyFunc(obj)
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
|
||||
return
|
||||
@ -372,12 +368,12 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if e.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
|
||||
klog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
|
||||
e.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Warningf("Dropping service %q out of the queue: %v", key, err)
|
||||
klog.Warningf("Dropping service %q out of the queue: %v", key, err)
|
||||
e.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
@ -385,7 +381,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
func (e *EndpointController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -412,7 +408,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("About to update endpoints for service %q", key)
|
||||
klog.V(5).Infof("About to update endpoints for service %q", key)
|
||||
pods, err := e.podLister.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated())
|
||||
if err != nil {
|
||||
// Since we're getting stuff from a local cache, it is
|
||||
@ -437,11 +433,11 @@ func (e *EndpointController) syncService(key string) error {
|
||||
|
||||
for _, pod := range pods {
|
||||
if len(pod.Status.PodIP) == 0 {
|
||||
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil {
|
||||
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -466,7 +462,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
portProto := servicePort.Protocol
|
||||
portNum, err := podutil.FindPort(pod, servicePort)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
klog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -476,9 +472,9 @@ func (e *EndpointController) syncService(key string) error {
|
||||
totalReadyEps = totalReadyEps + readyEps
|
||||
totalNotReadyEps = totalNotReadyEps + notReadyEps
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
}
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
|
||||
// See if there's actually an update here.
|
||||
currentEndpoints, err := e.endpointsLister.Endpoints(service.Namespace).Get(service.Name)
|
||||
@ -500,7 +496,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
if !createEndpoints &&
|
||||
apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) &&
|
||||
apiequality.Semantic.DeepEqual(currentEndpoints.Labels, service.Labels) {
|
||||
glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
klog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
return nil
|
||||
}
|
||||
newEndpoints := currentEndpoints.DeepCopy()
|
||||
@ -510,7 +506,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
newEndpoints.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||
if createEndpoints {
|
||||
// No previous endpoints, create them
|
||||
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints)
|
||||
@ -524,7 +520,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
// 1. namespace is terminating, endpoint creation is not allowed by default.
|
||||
// 2. policy is misconfigured, in which case no service would function anywhere.
|
||||
// Given the frequency of 1, we log at a lower level.
|
||||
glog.V(5).Infof("Forbidden from creating endpoints: %v", err)
|
||||
klog.V(5).Infof("Forbidden from creating endpoints: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -552,7 +548,7 @@ func (e *EndpointController) checkLeftoverEndpoints() {
|
||||
// as leader-election only have endpoints without service
|
||||
continue
|
||||
}
|
||||
key, err := keyFunc(ep)
|
||||
key, err := controller.KeyFunc(ep)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to get key for endpoint %#v", ep))
|
||||
continue
|
||||
@ -576,7 +572,7 @@ func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.Endpoint
|
||||
})
|
||||
readyEps++
|
||||
} else if shouldPodBeInEndpoints(pod) {
|
||||
glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
NotReadyAddresses: []v1.EndpointAddress{epa},
|
||||
Ports: ports,
|
||||
|
71
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
@ -120,7 +120,7 @@ func makeTestServer(t *testing.T, namespace string) (*httptest.Server, *utiltest
|
||||
mux.Handle(testapi.Default.ResourcePath("endpoints/", namespace, ""), &fakeEndpointsHandler)
|
||||
mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
|
||||
t.Errorf("unexpected request: %v", req.RequestURI)
|
||||
res.WriteHeader(http.StatusNotFound)
|
||||
http.Error(res, "", http.StatusNotFound)
|
||||
})
|
||||
return httptest.NewServer(mux), &fakeEndpointsHandler
|
||||
}
|
||||
@ -344,6 +344,47 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsProtocolSCTP(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||
defer testServer.Close()
|
||||
endpoints := newController(testServer.URL)
|
||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000, Protocol: "SCTP"}},
|
||||
}},
|
||||
})
|
||||
addPods(endpoints.podStore, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Add(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "SCTP"}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
|
||||
endpointsHandler.ValidateRequestCount(t, 1)
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "SCTP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||
@ -863,6 +904,34 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhase
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsHeadlessWithoutPort(t *testing.T) {
|
||||
ns := metav1.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||
defer testServer.Close()
|
||||
endpoints := newController(testServer.URL)
|
||||
endpoints.serviceStore.Add(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
ClusterIP: "None",
|
||||
Ports: nil,
|
||||
},
|
||||
})
|
||||
addPods(endpoints.podStore, ns, 1, 1, 0)
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 1)
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: nil,
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, ""), "POST", &data)
|
||||
}
|
||||
|
||||
// There are 3*5 possibilities(3 types of RestartPolicy by 5 types of PodPhase). Not list them all here.
|
||||
// Just list all of the 3 false cases and 3 of the 12 true cases.
|
||||
func TestShouldPodBeInEndpoints(t *testing.T) {
|
||||
|
Reference in New Issue
Block a user