vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -26,6 +26,7 @@ go_library(
"//pkg/probe/http:go_default_library",
"//pkg/probe/tcp:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",

View File

@ -99,7 +99,7 @@ func setTestProbe(pod *v1.Pod, probeType probeType, probeSpec v1.Probe) {
func newTestManager() *manager {
refManager := kubecontainer.NewRefManager()
refManager.SetRef(testContainerID, &v1.ObjectReference{}) // Suppress prober warnings.
podManager := kubepod.NewBasicPodManager(nil, nil, nil)
podManager := kubepod.NewBasicPodManager(nil, nil, nil, nil)
// Add test pod to pod manager, so that status manager can get the pod from pod manager if needed.
podManager.AddPod(getTestPod())
m := NewManager(

View File

@ -20,6 +20,7 @@ import (
"sync"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
@ -31,6 +32,16 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
// ProberResults stores the results of a probe as prometheus metrics.
var ProberResults = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: "prober",
Name: "probe_result",
Help: "The result of a liveness or readiness probe for a container.",
},
[]string{"probe_type", "container_name", "pod_name", "namespace", "pod_uid"},
)
// Manager manages pod probing. It creates a probe "worker" for every container that specifies a
// probe (AddPod). The worker periodically probes its assigned container and caches the results. The
// manager use the cached probe results to set the appropriate Ready state in the PodStatus when

View File

@ -58,6 +58,18 @@ func (r Result) String() string {
}
}
// ToPrometheusType translates a Result to a form which is better understood by prometheus.
func (r Result) ToPrometheusType() float64 {
switch r {
case Success:
return 0
case Failure:
return 1
default:
return -1
}
}
// Update is an enum of the types of updates sent over the Updates channel.
type Update struct {
ContainerID kubecontainer.ContainerID

View File

@ -21,6 +21,7 @@ import (
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -65,6 +66,10 @@ type worker struct {
// If set, skip probing.
onHold bool
// proberResultsMetricLabels holds the labels attached to this worker
// for the ProberResults metric.
proberResultsMetricLabels prometheus.Labels
}
// Creates and starts a new probe worker.
@ -93,6 +98,14 @@ func newWorker(
w.initialValue = results.Success
}
w.proberResultsMetricLabels = prometheus.Labels{
"probe_type": w.probeType.String(),
"container_name": w.container.Name,
"pod_name": w.pod.Name,
"namespace": w.pod.Namespace,
"pod_uid": string(w.pod.UID),
}
return w
}
@ -114,6 +127,7 @@ func (w *worker) run() {
}
w.probeManager.removeWorker(w.pod.UID, w.container.Name, w.probeType)
ProberResults.Delete(w.proberResultsMetricLabels)
}()
probeLoop:
@ -218,6 +232,7 @@ func (w *worker) doProbe() (keepGoing bool) {
}
w.resultsManager.Set(w.containerID, result, w.pod)
ProberResults.With(w.proberResultsMetricLabels).Set(result.ToPrometheusType())
if w.probeType == liveness && result == results.Failure {
// The container fails a liveness check, it will need to be restarted.
@ -225,7 +240,7 @@ func (w *worker) doProbe() (keepGoing bool) {
// chance of hitting #21751, where running `docker exec` when a
// container is being stopped may lead to corrupted container state.
w.onHold = true
w.resultRun = 1
w.resultRun = 0
}
return true

View File

@ -118,7 +118,7 @@ func TestDoProbe(t *testing.T) {
}
// Clean up.
m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil, nil), &statustest.FakePodDeletionSafetyProvider{})
m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil, nil, nil), &statustest.FakePodDeletionSafetyProvider{})
resultsManager(m, probeType).Remove(testContainerID)
}
}
@ -352,7 +352,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
if w.resultRun != 1 {
t.Errorf("Prober resultRun should 1")
t.Errorf("Prober resultRun should be 1")
}
m.prober.exec = fakeExecProber{probe.Failure, nil}
@ -360,7 +360,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
if w.resultRun != 1 {
t.Errorf("Prober resultRun should 1")
t.Errorf("Prober resultRun should be 1")
}
m.prober.exec = fakeExecProber{probe.Failure, nil}
@ -372,13 +372,13 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
}
// Exceeding FailureThreshold should cause resultRun to
// reset to 1 so that the probe on the restarted pod
// reset to 0 so that the probe on the restarted pod
// also gets FailureThreshold attempts to succeed.
m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "3rd probe failure, result failure"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
if w.resultRun != 1 {
t.Errorf("Prober resultRun should be reset to 1")
if w.resultRun != 0 {
t.Errorf("Prober resultRun should be reset to 0")
}
}