mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
34
vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD
generated
vendored
@ -25,15 +25,15 @@ go_library(
|
||||
"//pkg/probe/exec:go_default_library",
|
||||
"//pkg/probe/http:go_default_library",
|
||||
"//pkg/probe/tcp:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -56,15 +56,15 @@ go_test(
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/probe:go_default_library",
|
||||
"//pkg/probe/exec:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go
generated
vendored
@ -39,20 +39,20 @@ import (
|
||||
tcprobe "k8s.io/kubernetes/pkg/probe/tcp"
|
||||
"k8s.io/utils/exec"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const maxProbeRetries = 3
|
||||
|
||||
// Prober helps to check the liveness/readiness of a container.
|
||||
type prober struct {
|
||||
exec execprobe.ExecProber
|
||||
exec execprobe.Prober
|
||||
// probe types needs different httprobe instances so they don't
|
||||
// share a connection pool which can cause collsions to the
|
||||
// same host:port and transient failures. See #49740.
|
||||
readinessHttp httprobe.HTTPProber
|
||||
livenessHttp httprobe.HTTPProber
|
||||
tcp tcprobe.TCPProber
|
||||
readinessHttp httprobe.Prober
|
||||
livenessHttp httprobe.Prober
|
||||
tcp tcprobe.Prober
|
||||
runner kubecontainer.ContainerCommandRunner
|
||||
|
||||
refManager *kubecontainer.RefManager
|
||||
@ -91,7 +91,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
|
||||
|
||||
ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name)
|
||||
if probeSpec == nil {
|
||||
glog.Warningf("%s probe for %s is nil", probeType, ctrName)
|
||||
klog.Warningf("%s probe for %s is nil", probeType, ctrName)
|
||||
return results.Success, nil
|
||||
}
|
||||
|
||||
@ -100,22 +100,22 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
|
||||
// Probe failed in one way or another.
|
||||
ref, hasRef := pb.refManager.GetRef(containerID)
|
||||
if !hasRef {
|
||||
glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName)
|
||||
klog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
|
||||
klog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
|
||||
if hasRef {
|
||||
pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
|
||||
}
|
||||
} else { // result != probe.Success
|
||||
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
|
||||
klog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
|
||||
if hasRef {
|
||||
pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
|
||||
}
|
||||
}
|
||||
return results.Failure, err
|
||||
}
|
||||
glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName)
|
||||
klog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName)
|
||||
return results.Success, nil
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header {
|
||||
func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
|
||||
timeout := time.Duration(p.TimeoutSeconds) * time.Second
|
||||
if p.Exec != nil {
|
||||
glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command)
|
||||
klog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command)
|
||||
command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env)
|
||||
return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout))
|
||||
}
|
||||
@ -162,10 +162,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status
|
||||
return probe.Unknown, "", err
|
||||
}
|
||||
path := p.HTTPGet.Path
|
||||
glog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path)
|
||||
klog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path)
|
||||
url := formatURL(scheme, host, port, path)
|
||||
headers := buildHeader(p.HTTPGet.HTTPHeaders)
|
||||
glog.V(4).Infof("HTTP-Probe Headers: %v", headers)
|
||||
klog.V(4).Infof("HTTP-Probe Headers: %v", headers)
|
||||
if probeType == liveness {
|
||||
return pb.livenessHttp.Probe(url, headers, timeout)
|
||||
} else { // readiness
|
||||
@ -181,10 +181,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status
|
||||
if host == "" {
|
||||
host = status.PodIP
|
||||
}
|
||||
glog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout)
|
||||
klog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout)
|
||||
return pb.tcp.Probe(host, port, timeout)
|
||||
}
|
||||
glog.Warningf("Failed to find probe builder for container: %v", container)
|
||||
klog.Warningf("Failed to find probe builder for container: %v", container)
|
||||
return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name)
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go
generated
vendored
@ -19,13 +19,13 @@ package prober
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
@ -149,7 +149,7 @@ func (m *manager) AddPod(pod *v1.Pod) {
|
||||
if c.ReadinessProbe != nil {
|
||||
key.probeType = readiness
|
||||
if _, ok := m.workers[key]; ok {
|
||||
glog.Errorf("Readiness probe already exists! %v - %v",
|
||||
klog.Errorf("Readiness probe already exists! %v - %v",
|
||||
format.Pod(pod), c.Name)
|
||||
return
|
||||
}
|
||||
@ -161,7 +161,7 @@ func (m *manager) AddPod(pod *v1.Pod) {
|
||||
if c.LivenessProbe != nil {
|
||||
key.probeType = liveness
|
||||
if _, ok := m.workers[key]; ok {
|
||||
glog.Errorf("Liveness probe already exists! %v - %v",
|
||||
klog.Errorf("Liveness probe already exists! %v - %v",
|
||||
format.Pod(pod), c.Name)
|
||||
return
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager_test.go
generated
vendored
@ -22,12 +22,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
@ -359,7 +359,7 @@ func waitForWorkerExit(m *manager, workerPaths []probeKey) error {
|
||||
if exited, _ := condition(); exited {
|
||||
continue // Already exited, no need to poll.
|
||||
}
|
||||
glog.Infof("Polling %v", w)
|
||||
klog.Infof("Polling %v", w)
|
||||
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -384,7 +384,7 @@ func waitForReadyStatus(m *manager, ready bool) error {
|
||||
}
|
||||
return status.ContainerStatuses[0].Ready == ready, nil
|
||||
}
|
||||
glog.Infof("Polling for ready state %v", ready)
|
||||
klog.Infof("Polling for ready state %v", ready)
|
||||
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -399,7 +399,7 @@ func cleanup(t *testing.T, m *manager) {
|
||||
condition := func() (bool, error) {
|
||||
workerCount := m.workerCount()
|
||||
if workerCount > 0 {
|
||||
glog.Infof("Waiting for %d workers to exit...", workerCount)
|
||||
klog.Infof("Waiting for %d workers to exit...", workerCount)
|
||||
}
|
||||
return workerCount == 0, nil
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/BUILD
generated
vendored
@ -12,8 +12,8 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/prober/results",
|
||||
deps = [
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -23,10 +23,10 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/prober/testing/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/prober/testing/BUILD
generated
vendored
@ -10,8 +10,8 @@ go_library(
|
||||
srcs = ["fake_manager.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/prober/testing",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go
generated
vendored
@ -20,10 +20,10 @@ import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/klog"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
@ -160,13 +160,13 @@ func (w *worker) doProbe() (keepGoing bool) {
|
||||
status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
|
||||
if !ok {
|
||||
// Either the pod has not been created yet, or it was already deleted.
|
||||
glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod))
|
||||
klog.V(3).Infof("No status for pod: %v", format.Pod(w.pod))
|
||||
return true
|
||||
}
|
||||
|
||||
// Worker should terminate if pod is terminated.
|
||||
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
|
||||
glog.V(3).Infof("Pod %v %v, exiting probe worker",
|
||||
klog.V(3).Infof("Pod %v %v, exiting probe worker",
|
||||
format.Pod(w.pod), status.Phase)
|
||||
return false
|
||||
}
|
||||
@ -174,7 +174,7 @@ func (w *worker) doProbe() (keepGoing bool) {
|
||||
c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name)
|
||||
if !ok || len(c.ContainerID) == 0 {
|
||||
// Either the container has not been created yet, or it was deleted.
|
||||
glog.V(3).Infof("Probe target container not found: %v - %v",
|
||||
klog.V(3).Infof("Probe target container not found: %v - %v",
|
||||
format.Pod(w.pod), w.container.Name)
|
||||
return true // Wait for more information.
|
||||
}
|
||||
@ -195,7 +195,7 @@ func (w *worker) doProbe() (keepGoing bool) {
|
||||
}
|
||||
|
||||
if c.State.Running == nil {
|
||||
glog.V(3).Infof("Non-running container probed: %v - %v",
|
||||
klog.V(3).Infof("Non-running container probed: %v - %v",
|
||||
format.Pod(w.pod), w.container.Name)
|
||||
if !w.containerID.IsEmpty() {
|
||||
w.resultsManager.Set(w.containerID, results.Failure, w.pod)
|
||||
|
Reference in New Issue
Block a user