mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
@ -42,7 +42,6 @@ go_test(
|
||||
srcs = ["endpoints_controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/endpoints:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
@ -50,6 +49,7 @@ go_test(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/controller/endpoint/OWNERS
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/endpoint/OWNERS
generated
vendored
@ -1,5 +1,10 @@
|
||||
reviewers:
|
||||
- bowei
|
||||
approvers:
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
- matchstick
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
@ -64,7 +64,7 @@ const (
|
||||
// containers in the pod and marks it "Running", till the kubelet stops all
|
||||
// containers and deletes the pod from the apiserver.
|
||||
// This field is deprecated. v1.Service.PublishNotReadyAddresses will replace it
|
||||
// subsequent releases.
|
||||
// subsequent releases. It will be removed no sooner than 1.13.
|
||||
TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
|
||||
)
|
||||
|
||||
@ -372,7 +372,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if e.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing endpoints for service %q: %v", key, err)
|
||||
glog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
|
||||
e.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
@ -385,7 +385,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
func (e *EndpointController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -420,7 +420,8 @@ func (e *EndpointController) syncService(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var tolerateUnreadyEndpoints bool
|
||||
// If the user specified the older (deprecated) annotation, we have to respect it.
|
||||
tolerateUnreadyEndpoints := service.Spec.PublishNotReadyAddresses
|
||||
if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err == nil {
|
||||
@ -454,8 +455,8 @@ func (e *EndpointController) syncService(key string) error {
|
||||
// Allow headless service not to have ports.
|
||||
if len(service.Spec.Ports) == 0 {
|
||||
if service.Spec.ClusterIP == api.ClusterIPNone {
|
||||
epp := v1.EndpointPort{Port: 0, Protocol: v1.ProtocolTCP}
|
||||
subsets, totalReadyEps, totalNotReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints)
|
||||
subsets, totalReadyEps, totalNotReadyEps = addEndpointSubset(subsets, pod, epa, nil, tolerateUnreadyEndpoints)
|
||||
// No need to repack subsets for headless service without ports.
|
||||
}
|
||||
} else {
|
||||
for i := range service.Spec.Ports {
|
||||
@ -470,14 +471,14 @@ func (e *EndpointController) syncService(key string) error {
|
||||
}
|
||||
|
||||
var readyEps, notReadyEps int
|
||||
epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
epp := &v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
subsets, readyEps, notReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints)
|
||||
totalReadyEps = totalReadyEps + readyEps
|
||||
totalNotReadyEps = totalNotReadyEps + notReadyEps
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
}
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
|
||||
// See if there's actually an update here.
|
||||
currentEndpoints, err := e.endpointsLister.Endpoints(service.Namespace).Get(service.Name)
|
||||
@ -561,20 +562,24 @@ func (e *EndpointController) checkLeftoverEndpoints() {
|
||||
}
|
||||
|
||||
func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.EndpointAddress,
|
||||
epp v1.EndpointPort, tolerateUnreadyEndpoints bool) ([]v1.EndpointSubset, int, int) {
|
||||
epp *v1.EndpointPort, tolerateUnreadyEndpoints bool) ([]v1.EndpointSubset, int, int) {
|
||||
var readyEps int = 0
|
||||
var notReadyEps int = 0
|
||||
ports := []v1.EndpointPort{}
|
||||
if epp != nil {
|
||||
ports = append(ports, *epp)
|
||||
}
|
||||
if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) {
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
Ports: ports,
|
||||
})
|
||||
readyEps++
|
||||
} else if shouldPodBeInEndpoints(pod) {
|
||||
glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
NotReadyAddresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
Ports: ports,
|
||||
})
|
||||
notReadyEps++
|
||||
}
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -34,7 +35,6 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -48,7 +48,7 @@ var emptyNodeName string
|
||||
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
|
||||
for i := 0; i < nPods+nNotReady; i++ {
|
||||
p := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
@ -81,7 +81,7 @@ func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotRea
|
||||
func addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(store cache.Store, namespace string, nPods int, nPorts int, restartPolicy v1.RestartPolicy, podPhase v1.PodPhase) {
|
||||
for i := 0; i < nPods; i++ {
|
||||
p := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
@ -110,11 +110,6 @@ func addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(store cache.Store, namesp
|
||||
}
|
||||
}
|
||||
|
||||
type serverResponse struct {
|
||||
statusCode int
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
func makeTestServer(t *testing.T, namespace string) (*httptest.Server, *utiltesting.FakeHandler) {
|
||||
fakeEndpointsHandler := utiltesting.FakeHandler{
|
||||
StatusCode: http.StatusOK,
|
||||
@ -138,7 +133,7 @@ type endpointController struct {
|
||||
}
|
||||
|
||||
func newController(url string) *endpointController {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
endpoints := NewEndpointController(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Services(),
|
||||
informerFactory.Core().V1().Endpoints(), client)
|
||||
@ -753,7 +748,7 @@ func TestSyncEndpointsHeadlessService(t *testing.T) {
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 0, Protocol: "TCP"}},
|
||||
Ports: []v1.EndpointPort{},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequestCount(t, 1)
|
||||
|
Reference in New Issue
Block a user