vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

87
vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD generated vendored Normal file
View File

@ -0,0 +1,87 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"prober.go",
"prober_manager.go",
"worker.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/prober",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/probe:go_default_library",
"//pkg/probe/exec:go_default_library",
"//pkg/probe/http:go_default_library",
"//pkg/probe/tcp:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"common_test.go",
"prober_manager_test.go",
"prober_test.go",
"worker_test.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/prober",
library = ":go_default_library",
deps = [
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/status/testing:go_default_library",
"//pkg/probe:go_default_library",
"//pkg/probe/exec:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/prober/results:all-srcs",
"//pkg/kubelet/prober/testing:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,148 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"reflect"
"sync"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/utils/exec"
)
const (
testContainerName = "cOnTaInEr_NaMe"
testPodUID = "pOd_UiD"
)
var testContainerID = kubecontainer.ContainerID{Type: "test", ID: "cOnTaInEr_Id"}
func getTestRunningStatus() v1.PodStatus {
containerStatus := v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
}
containerStatus.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.Now()}
podStatus := v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{containerStatus},
}
return podStatus
}
func getTestPod() *v1.Pod {
container := v1.Container{
Name: testContainerName,
}
pod := v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{container},
RestartPolicy: v1.RestartPolicyNever,
},
}
pod.Name = "testPod"
pod.UID = testPodUID
return &pod
}
func setTestProbe(pod *v1.Pod, probeType probeType, probeSpec v1.Probe) {
// All tests rely on the fake exec prober.
probeSpec.Handler = v1.Handler{
Exec: &v1.ExecAction{},
}
// Apply test defaults, overwridden for test speed.
defaults := map[string]int64{
"TimeoutSeconds": 1,
"PeriodSeconds": 1,
"SuccessThreshold": 1,
"FailureThreshold": 1,
}
for field, value := range defaults {
f := reflect.ValueOf(&probeSpec).Elem().FieldByName(field)
if f.Int() == 0 {
f.SetInt(value)
}
}
switch probeType {
case readiness:
pod.Spec.Containers[0].ReadinessProbe = &probeSpec
case liveness:
pod.Spec.Containers[0].LivenessProbe = &probeSpec
}
}
func newTestManager() *manager {
refManager := kubecontainer.NewRefManager()
refManager.SetRef(testContainerID, &v1.ObjectReference{}) // Suppress prober warnings.
podManager := kubepod.NewBasicPodManager(nil, nil, nil)
// Add test pod to pod manager, so that status manager can get the pod from pod manager if needed.
podManager.AddPod(getTestPod())
m := NewManager(
status.NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}),
results.NewManager(),
nil, // runner
refManager,
&record.FakeRecorder{},
).(*manager)
// Don't actually execute probes.
m.prober.exec = fakeExecProber{probe.Success, nil}
return m
}
func newTestWorker(m *manager, probeType probeType, probeSpec v1.Probe) *worker {
pod := getTestPod()
setTestProbe(pod, probeType, probeSpec)
return newWorker(m, probeType, pod, pod.Spec.Containers[0])
}
type fakeExecProber struct {
result probe.Result
err error
}
func (p fakeExecProber) Probe(c exec.Cmd) (probe.Result, string, error) {
return p.result, "", p.err
}
type syncExecProber struct {
sync.RWMutex
fakeExecProber
}
func (p *syncExecProber) set(result probe.Result, err error) {
p.Lock()
defer p.Unlock()
p.result = result
p.err = err
}
func (p *syncExecProber) Probe(cmd exec.Cmd) (probe.Result, string, error) {
p.RLock()
defer p.RUnlock()
return p.fakeExecProber.Probe(cmd)
}

279
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go generated vendored Normal file
View File

@ -0,0 +1,279 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"fmt"
"io"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/probe"
execprobe "k8s.io/kubernetes/pkg/probe/exec"
httprobe "k8s.io/kubernetes/pkg/probe/http"
tcprobe "k8s.io/kubernetes/pkg/probe/tcp"
"k8s.io/utils/exec"
"github.com/golang/glog"
)
const maxProbeRetries = 3
// Prober helps to check the liveness/readiness of a container.
type prober struct {
exec execprobe.ExecProber
// probe types needs different httprobe instances so they don't
// share a connection pool which can cause collsions to the
// same host:port and transient failures. See #49740.
readinessHttp httprobe.HTTPProber
livenessHttp httprobe.HTTPProber
tcp tcprobe.TCPProber
runner kubecontainer.ContainerCommandRunner
refManager *kubecontainer.RefManager
recorder record.EventRecorder
}
// NewProber creates a Prober, it takes a command runner and
// several container info managers.
func newProber(
runner kubecontainer.ContainerCommandRunner,
refManager *kubecontainer.RefManager,
recorder record.EventRecorder) *prober {
return &prober{
exec: execprobe.New(),
readinessHttp: httprobe.New(),
livenessHttp: httprobe.New(),
tcp: tcprobe.New(),
runner: runner,
refManager: refManager,
recorder: recorder,
}
}
// probe probes the container.
func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) {
var probeSpec *v1.Probe
switch probeType {
case readiness:
probeSpec = container.ReadinessProbe
case liveness:
probeSpec = container.LivenessProbe
default:
return results.Failure, fmt.Errorf("Unknown probe type: %q", probeType)
}
ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name)
if probeSpec == nil {
glog.Warningf("%s probe for %s is nil", probeType, ctrName)
return results.Success, nil
}
result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries)
if err != nil || result != probe.Success {
// Probe failed in one way or another.
ref, hasRef := pb.refManager.GetRef(containerID)
if !hasRef {
glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName)
}
if err != nil {
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
if hasRef {
pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
}
} else { // result != probe.Success
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
if hasRef {
pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
}
}
return results.Failure, err
}
glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName)
return results.Success, nil
}
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds.
func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) {
var err error
var result probe.Result
var output string
for i := 0; i < retries; i++ {
result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID)
if err == nil {
return result, output, nil
}
}
return result, output, err
}
// buildHeaderMap takes a list of HTTPHeader <name, value> string
// pairs and returns a populated string->[]string http.Header map.
func buildHeader(headerList []v1.HTTPHeader) http.Header {
headers := make(http.Header)
for _, header := range headerList {
headers[header.Name] = append(headers[header.Name], header.Value)
}
return headers
}
func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil {
glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command)
command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env)
return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout))
}
if p.HTTPGet != nil {
scheme := strings.ToLower(string(p.HTTPGet.Scheme))
host := p.HTTPGet.Host
if host == "" {
host = status.PodIP
}
port, err := extractPort(p.HTTPGet.Port, container)
if err != nil {
return probe.Unknown, "", err
}
path := p.HTTPGet.Path
glog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path)
url := formatURL(scheme, host, port, path)
headers := buildHeader(p.HTTPGet.HTTPHeaders)
glog.V(4).Infof("HTTP-Probe Headers: %v", headers)
if probeType == liveness {
return pb.livenessHttp.Probe(url, headers, timeout)
} else { // readiness
return pb.readinessHttp.Probe(url, headers, timeout)
}
}
if p.TCPSocket != nil {
port, err := extractPort(p.TCPSocket.Port, container)
if err != nil {
return probe.Unknown, "", err
}
host := p.TCPSocket.Host
if host == "" {
host = status.PodIP
}
glog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout)
return pb.tcp.Probe(host, port, timeout)
}
glog.Warningf("Failed to find probe builder for container: %v", container)
return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name)
}
func extractPort(param intstr.IntOrString, container v1.Container) (int, error) {
port := -1
var err error
switch param.Type {
case intstr.Int:
port = param.IntValue()
case intstr.String:
if port, err = findPortByName(container, param.StrVal); err != nil {
// Last ditch effort - maybe it was an int stored as string?
if port, err = strconv.Atoi(param.StrVal); err != nil {
return port, err
}
}
default:
return port, fmt.Errorf("IntOrString had no kind: %+v", param)
}
if port > 0 && port < 65536 {
return port, nil
}
return port, fmt.Errorf("invalid port number: %v", port)
}
// findPortByName is a helper function to look up a port in a container by name.
func findPortByName(container v1.Container, portName string) (int, error) {
for _, port := range container.Ports {
if port.Name == portName {
return int(port.ContainerPort), nil
}
}
return 0, fmt.Errorf("port %s not found", portName)
}
// formatURL formats a URL from args. For testability.
func formatURL(scheme string, host string, port int, path string) *url.URL {
u, err := url.Parse(path)
// Something is busted with the path, but it's too late to reject it. Pass it along as is.
if err != nil {
u = &url.URL{
Path: path,
}
}
u.Scheme = scheme
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
return u
}
type execInContainer struct {
// run executes a command in a container. Combined stdout and stderr output is always returned. An
// error is returned if one occurred.
run func() ([]byte, error)
}
func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
return execInContainer{func() ([]byte, error) {
return pb.runner.RunInContainer(containerID, cmd, timeout)
}}
}
func (eic execInContainer) Run() error {
return fmt.Errorf("unimplemented")
}
func (eic execInContainer) CombinedOutput() ([]byte, error) {
return eic.run()
}
func (eic execInContainer) Output() ([]byte, error) {
return nil, fmt.Errorf("unimplemented")
}
func (eic execInContainer) SetDir(dir string) {
//unimplemented
}
func (eic execInContainer) SetStdin(in io.Reader) {
//unimplemented
}
func (eic execInContainer) SetStdout(out io.Writer) {
//unimplemented
}
func (eic execInContainer) SetStderr(out io.Writer) {
//unimplemented
}
func (eic execInContainer) Stop() {
//unimplemented
}

View File

@ -0,0 +1,247 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
// Manager manages pod probing. It creates a probe "worker" for every container that specifies a
// probe (AddPod). The worker periodically probes its assigned container and caches the results. The
// manager use the cached probe results to set the appropriate Ready state in the PodStatus when
// requested (UpdatePodStatus). Updating probe parameters is not currently supported.
// TODO: Move liveness probing out of the runtime, to here.
type Manager interface {
// AddPod creates new probe workers for every container probe. This should be called for every
// pod created.
AddPod(pod *v1.Pod)
// RemovePod handles cleaning up the removed pod state, including terminating probe workers and
// deleting cached results.
RemovePod(pod *v1.Pod)
// CleanupPods handles cleaning up pods which should no longer be running.
// It takes a list of "active pods" which should not be cleaned up.
CleanupPods(activePods []*v1.Pod)
// UpdatePodStatus modifies the given PodStatus with the appropriate Ready state for each
// container based on container running status, cached probe results and worker states.
UpdatePodStatus(types.UID, *v1.PodStatus)
// Start starts the Manager sync loops.
Start()
}
type manager struct {
// Map of active workers for probes
workers map[probeKey]*worker
// Lock for accessing & mutating workers
workerLock sync.RWMutex
// The statusManager cache provides pod IP and container IDs for probing.
statusManager status.Manager
// readinessManager manages the results of readiness probes
readinessManager results.Manager
// livenessManager manages the results of liveness probes
livenessManager results.Manager
// prober executes the probe actions.
prober *prober
}
func NewManager(
statusManager status.Manager,
livenessManager results.Manager,
runner kubecontainer.ContainerCommandRunner,
refManager *kubecontainer.RefManager,
recorder record.EventRecorder) Manager {
prober := newProber(runner, refManager, recorder)
readinessManager := results.NewManager()
return &manager{
statusManager: statusManager,
prober: prober,
readinessManager: readinessManager,
livenessManager: livenessManager,
workers: make(map[probeKey]*worker),
}
}
// Start syncing probe status. This should only be called once.
func (m *manager) Start() {
// Start syncing readiness.
go wait.Forever(m.updateReadiness, 0)
}
// Key uniquely identifying container probes
type probeKey struct {
podUID types.UID
containerName string
probeType probeType
}
// Type of probe (readiness or liveness)
type probeType int
const (
liveness probeType = iota
readiness
)
// For debugging.
func (t probeType) String() string {
switch t {
case readiness:
return "Readiness"
case liveness:
return "Liveness"
default:
return "UNKNOWN"
}
}
func (m *manager) AddPod(pod *v1.Pod) {
m.workerLock.Lock()
defer m.workerLock.Unlock()
key := probeKey{podUID: pod.UID}
for _, c := range pod.Spec.Containers {
key.containerName = c.Name
if c.ReadinessProbe != nil {
key.probeType = readiness
if _, ok := m.workers[key]; ok {
glog.Errorf("Readiness probe already exists! %v - %v",
format.Pod(pod), c.Name)
return
}
w := newWorker(m, readiness, pod, c)
m.workers[key] = w
go w.run()
}
if c.LivenessProbe != nil {
key.probeType = liveness
if _, ok := m.workers[key]; ok {
glog.Errorf("Liveness probe already exists! %v - %v",
format.Pod(pod), c.Name)
return
}
w := newWorker(m, liveness, pod, c)
m.workers[key] = w
go w.run()
}
}
}
func (m *manager) RemovePod(pod *v1.Pod) {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
key := probeKey{podUID: pod.UID}
for _, c := range pod.Spec.Containers {
key.containerName = c.Name
for _, probeType := range [...]probeType{readiness, liveness} {
key.probeType = probeType
if worker, ok := m.workers[key]; ok {
worker.stop()
}
}
}
}
func (m *manager) CleanupPods(activePods []*v1.Pod) {
desiredPods := make(map[types.UID]sets.Empty)
for _, pod := range activePods {
desiredPods[pod.UID] = sets.Empty{}
}
m.workerLock.RLock()
defer m.workerLock.RUnlock()
for key, worker := range m.workers {
if _, ok := desiredPods[key.podUID]; !ok {
worker.stop()
}
}
}
func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) {
for i, c := range podStatus.ContainerStatuses {
var ready bool
if c.State.Running == nil {
ready = false
} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
ready = result == results.Success
} else {
// The check whether there is a probe which hasn't run yet.
_, exists := m.getWorker(podUID, c.Name, readiness)
ready = !exists
}
podStatus.ContainerStatuses[i].Ready = ready
}
// init containers are ready if they have exited with success or if a readiness probe has
// succeeded.
for i, c := range podStatus.InitContainerStatuses {
var ready bool
if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 {
ready = true
}
podStatus.InitContainerStatuses[i].Ready = ready
}
}
func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
worker, ok := m.workers[probeKey{podUID, containerName, probeType}]
return worker, ok
}
// Called by the worker after exiting.
func (m *manager) removeWorker(podUID types.UID, containerName string, probeType probeType) {
m.workerLock.Lock()
defer m.workerLock.Unlock()
delete(m.workers, probeKey{podUID, containerName, probeType})
}
// workerCount returns the total number of probe workers. For testing.
func (m *manager) workerCount() int {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
return len(m.workers)
}
func (m *manager) updateReadiness() {
update := <-m.readinessManager.Updates()
ready := update.Result == results.Success
m.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready)
}

View File

@ -0,0 +1,412 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/probe"
)
func init() {
runtime.ReallyCrash = true
}
var defaultProbe *v1.Probe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{},
},
TimeoutSeconds: 1,
PeriodSeconds: 1,
SuccessThreshold: 1,
FailureThreshold: 3,
}
func TestAddRemovePods(t *testing.T) {
noProbePod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "no_probe_pod",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "no_probe1",
}, {
Name: "no_probe2",
}},
},
}
probePod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "probe_pod",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "no_probe1",
}, {
Name: "readiness",
ReadinessProbe: defaultProbe,
}, {
Name: "no_probe2",
}, {
Name: "liveness",
LivenessProbe: defaultProbe,
}},
},
}
m := newTestManager()
defer cleanup(t, m)
if err := expectProbes(m, nil); err != nil {
t.Error(err)
}
// Adding a pod with no probes should be a no-op.
m.AddPod(&noProbePod)
if err := expectProbes(m, nil); err != nil {
t.Error(err)
}
// Adding a pod with probes.
m.AddPod(&probePod)
probePaths := []probeKey{
{"probe_pod", "readiness", readiness},
{"probe_pod", "liveness", liveness},
}
if err := expectProbes(m, probePaths); err != nil {
t.Error(err)
}
// Removing un-probed pod.
m.RemovePod(&noProbePod)
if err := expectProbes(m, probePaths); err != nil {
t.Error(err)
}
// Removing probed pod.
m.RemovePod(&probePod)
if err := waitForWorkerExit(m, probePaths); err != nil {
t.Fatal(err)
}
if err := expectProbes(m, nil); err != nil {
t.Error(err)
}
// Removing already removed pods should be a no-op.
m.RemovePod(&probePod)
if err := expectProbes(m, nil); err != nil {
t.Error(err)
}
}
func TestCleanupPods(t *testing.T) {
m := newTestManager()
defer cleanup(t, m)
podToCleanup := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "pod_cleanup",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "prober1",
ReadinessProbe: defaultProbe,
}, {
Name: "prober2",
LivenessProbe: defaultProbe,
}},
},
}
podToKeep := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "pod_keep",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "prober1",
ReadinessProbe: defaultProbe,
}, {
Name: "prober2",
LivenessProbe: defaultProbe,
}},
},
}
m.AddPod(&podToCleanup)
m.AddPod(&podToKeep)
m.CleanupPods([]*v1.Pod{&podToKeep})
removedProbes := []probeKey{
{"pod_cleanup", "prober1", readiness},
{"pod_cleanup", "prober2", liveness},
}
expectedProbes := []probeKey{
{"pod_keep", "prober1", readiness},
{"pod_keep", "prober2", liveness},
}
if err := waitForWorkerExit(m, removedProbes); err != nil {
t.Fatal(err)
}
if err := expectProbes(m, expectedProbes); err != nil {
t.Error(err)
}
}
func TestCleanupRepeated(t *testing.T) {
m := newTestManager()
defer cleanup(t, m)
podTemplate := v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "prober1",
ReadinessProbe: defaultProbe,
LivenessProbe: defaultProbe,
}},
},
}
const numTestPods = 100
for i := 0; i < numTestPods; i++ {
pod := podTemplate
pod.UID = types.UID(strconv.Itoa(i))
m.AddPod(&pod)
}
for i := 0; i < 10; i++ {
m.CleanupPods([]*v1.Pod{})
}
}
func TestUpdatePodStatus(t *testing.T) {
unprobed := v1.ContainerStatus{
Name: "unprobed_container",
ContainerID: "test://unprobed_container_id",
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
}
probedReady := v1.ContainerStatus{
Name: "probed_container_ready",
ContainerID: "test://probed_container_ready_id",
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
}
probedPending := v1.ContainerStatus{
Name: "probed_container_pending",
ContainerID: "test://probed_container_pending_id",
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
}
probedUnready := v1.ContainerStatus{
Name: "probed_container_unready",
ContainerID: "test://probed_container_unready_id",
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
}
terminated := v1.ContainerStatus{
Name: "terminated_container",
ContainerID: "test://terminated_container_id",
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
}
podStatus := v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
unprobed, probedReady, probedPending, probedUnready, terminated,
},
}
m := newTestManager()
// no cleanup: using fake workers.
// Setup probe "workers" and cached results.
m.workers = map[probeKey]*worker{
{testPodUID, unprobed.Name, liveness}: {},
{testPodUID, probedReady.Name, readiness}: {},
{testPodUID, probedPending.Name, readiness}: {},
{testPodUID, probedUnready.Name, readiness}: {},
{testPodUID, terminated.Name, readiness}: {},
}
m.readinessManager.Set(kubecontainer.ParseContainerID(probedReady.ContainerID), results.Success, &v1.Pod{})
m.readinessManager.Set(kubecontainer.ParseContainerID(probedUnready.ContainerID), results.Failure, &v1.Pod{})
m.readinessManager.Set(kubecontainer.ParseContainerID(terminated.ContainerID), results.Success, &v1.Pod{})
m.UpdatePodStatus(testPodUID, &podStatus)
expectedReadiness := map[probeKey]bool{
{testPodUID, unprobed.Name, readiness}: true,
{testPodUID, probedReady.Name, readiness}: true,
{testPodUID, probedPending.Name, readiness}: false,
{testPodUID, probedUnready.Name, readiness}: false,
{testPodUID, terminated.Name, readiness}: false,
}
for _, c := range podStatus.ContainerStatuses {
expected, ok := expectedReadiness[probeKey{testPodUID, c.Name, readiness}]
if !ok {
t.Fatalf("Missing expectation for test case: %v", c.Name)
}
if expected != c.Ready {
t.Errorf("Unexpected readiness for container %v: Expected %v but got %v",
c.Name, expected, c.Ready)
}
}
}
func TestUpdateReadiness(t *testing.T) {
testPod := getTestPod()
setTestProbe(testPod, readiness, v1.Probe{})
m := newTestManager()
defer cleanup(t, m)
// Start syncing readiness without leaking goroutine.
stopCh := make(chan struct{})
go wait.Until(m.updateReadiness, 0, stopCh)
defer func() {
close(stopCh)
// Send an update to exit updateReadiness()
m.readinessManager.Set(kubecontainer.ContainerID{}, results.Success, &v1.Pod{})
}()
exec := syncExecProber{}
exec.set(probe.Success, nil)
m.prober.exec = &exec
m.statusManager.SetPodStatus(testPod, getTestRunningStatus())
m.AddPod(testPod)
probePaths := []probeKey{{testPodUID, testContainerName, readiness}}
if err := expectProbes(m, probePaths); err != nil {
t.Error(err)
}
// Wait for ready status.
if err := waitForReadyStatus(m, true); err != nil {
t.Error(err)
}
// Prober fails.
exec.set(probe.Failure, nil)
// Wait for failed status.
if err := waitForReadyStatus(m, false); err != nil {
t.Error(err)
}
}
func expectProbes(m *manager, expectedProbes []probeKey) error {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
var unexpected []probeKey
missing := make([]probeKey, len(expectedProbes))
copy(missing, expectedProbes)
outer:
for probePath := range m.workers {
for i, expectedPath := range missing {
if probePath == expectedPath {
missing = append(missing[:i], missing[i+1:]...)
continue outer
}
}
unexpected = append(unexpected, probePath)
}
if len(missing) == 0 && len(unexpected) == 0 {
return nil // Yay!
}
return fmt.Errorf("Unexpected probes: %v; Missing probes: %v;", unexpected, missing)
}
const interval = 1 * time.Second
// Wait for the given workers to exit & clean up.
func waitForWorkerExit(m *manager, workerPaths []probeKey) error {
for _, w := range workerPaths {
condition := func() (bool, error) {
_, exists := m.getWorker(w.podUID, w.containerName, w.probeType)
return !exists, nil
}
if exited, _ := condition(); exited {
continue // Already exited, no need to poll.
}
glog.Infof("Polling %v", w)
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
return err
}
}
return nil
}
// Wait for the given workers to exit & clean up.
func waitForReadyStatus(m *manager, ready bool) error {
condition := func() (bool, error) {
status, ok := m.statusManager.GetPodStatus(testPodUID)
if !ok {
return false, fmt.Errorf("status not found: %q", testPodUID)
}
if len(status.ContainerStatuses) != 1 {
return false, fmt.Errorf("expected single container, found %d", len(status.ContainerStatuses))
}
if status.ContainerStatuses[0].ContainerID != testContainerID.String() {
return false, fmt.Errorf("expected container %q, found %q",
testContainerID, status.ContainerStatuses[0].ContainerID)
}
return status.ContainerStatuses[0].Ready == ready, nil
}
glog.Infof("Polling for ready state %v", ready)
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
return err
}
return nil
}
// cleanup running probes to avoid leaking goroutines.
func cleanup(t *testing.T, m *manager) {
m.CleanupPods(nil)
condition := func() (bool, error) {
workerCount := m.workerCount()
if workerCount > 0 {
glog.Infof("Waiting for %d workers to exit...", workerCount)
}
return workerCount == 0, nil
}
if exited, _ := condition(); exited {
return // Already exited, no need to poll.
}
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
t.Fatalf("Error during cleanup: %v", err)
}
}

View File

@ -0,0 +1,368 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"errors"
"fmt"
"net/http"
"reflect"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/probe"
execprobe "k8s.io/kubernetes/pkg/probe/exec"
)
func TestFormatURL(t *testing.T) {
testCases := []struct {
scheme string
host string
port int
path string
result string
}{
{"http", "localhost", 93, "", "http://localhost:93"},
{"https", "localhost", 93, "/path", "https://localhost:93/path"},
{"http", "localhost", 93, "?foo", "http://localhost:93?foo"},
{"https", "localhost", 93, "/path?bar", "https://localhost:93/path?bar"},
}
for _, test := range testCases {
url := formatURL(test.scheme, test.host, test.port, test.path)
if url.String() != test.result {
t.Errorf("Expected %s, got %s", test.result, url.String())
}
}
}
func TestFindPortByName(t *testing.T) {
container := v1.Container{
Ports: []v1.ContainerPort{
{
Name: "foo",
ContainerPort: 8080,
},
{
Name: "bar",
ContainerPort: 9000,
},
},
}
want := 8080
got, err := findPortByName(container, "foo")
if got != want || err != nil {
t.Errorf("Expected %v, got %v, err: %v", want, got, err)
}
}
func TestGetURLParts(t *testing.T) {
testCases := []struct {
probe *v1.HTTPGetAction
ok bool
host string
port int
path string
}{
{&v1.HTTPGetAction{Host: "", Port: intstr.FromInt(-1), Path: ""}, false, "", -1, ""},
{&v1.HTTPGetAction{Host: "", Port: intstr.FromString(""), Path: ""}, false, "", -1, ""},
{&v1.HTTPGetAction{Host: "", Port: intstr.FromString("-1"), Path: ""}, false, "", -1, ""},
{&v1.HTTPGetAction{Host: "", Port: intstr.FromString("not-found"), Path: ""}, false, "", -1, ""},
{&v1.HTTPGetAction{Host: "", Port: intstr.FromString("found"), Path: ""}, true, "127.0.0.1", 93, ""},
{&v1.HTTPGetAction{Host: "", Port: intstr.FromInt(76), Path: ""}, true, "127.0.0.1", 76, ""},
{&v1.HTTPGetAction{Host: "", Port: intstr.FromString("118"), Path: ""}, true, "127.0.0.1", 118, ""},
{&v1.HTTPGetAction{Host: "hostname", Port: intstr.FromInt(76), Path: "path"}, true, "hostname", 76, "path"},
}
for _, test := range testCases {
state := v1.PodStatus{PodIP: "127.0.0.1"}
container := v1.Container{
Ports: []v1.ContainerPort{{Name: "found", ContainerPort: 93}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: test.probe,
},
},
}
scheme := test.probe.Scheme
if scheme == "" {
scheme = v1.URISchemeHTTP
}
host := test.probe.Host
if host == "" {
host = state.PodIP
}
port, err := extractPort(test.probe.Port, container)
if test.ok && err != nil {
t.Errorf("Unexpected error: %v", err)
}
path := test.probe.Path
if !test.ok && err == nil {
t.Errorf("Expected error for %+v, got %s%s:%d/%s", test, scheme, host, port, path)
}
if test.ok {
if host != test.host || port != test.port || path != test.path {
t.Errorf("Expected %s:%d/%s, got %s:%d/%s",
test.host, test.port, test.path, host, port, path)
}
}
}
}
func TestGetTCPAddrParts(t *testing.T) {
testCases := []struct {
probe *v1.TCPSocketAction
ok bool
host string
port int
}{
{&v1.TCPSocketAction{Port: intstr.FromInt(-1)}, false, "", -1},
{&v1.TCPSocketAction{Port: intstr.FromString("")}, false, "", -1},
{&v1.TCPSocketAction{Port: intstr.FromString("-1")}, false, "", -1},
{&v1.TCPSocketAction{Port: intstr.FromString("not-found")}, false, "", -1},
{&v1.TCPSocketAction{Port: intstr.FromString("found")}, true, "1.2.3.4", 93},
{&v1.TCPSocketAction{Port: intstr.FromInt(76)}, true, "1.2.3.4", 76},
{&v1.TCPSocketAction{Port: intstr.FromString("118")}, true, "1.2.3.4", 118},
}
for _, test := range testCases {
host := "1.2.3.4"
container := v1.Container{
Ports: []v1.ContainerPort{{Name: "found", ContainerPort: 93}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
TCPSocket: test.probe,
},
},
}
port, err := extractPort(test.probe.Port, container)
if !test.ok && err == nil {
t.Errorf("Expected error for %+v, got %s:%d", test, host, port)
}
if test.ok && err != nil {
t.Errorf("Unexpected error: %v", err)
}
if test.ok {
if host != test.host || port != test.port {
t.Errorf("Expected %s:%d, got %s:%d", test.host, test.port, host, port)
}
}
}
}
func TestHTTPHeaders(t *testing.T) {
testCases := []struct {
input []v1.HTTPHeader
output http.Header
}{
{[]v1.HTTPHeader{}, http.Header{}},
{[]v1.HTTPHeader{
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}}},
{[]v1.HTTPHeader{
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
{Name: "X-Muffins-Or-Plumcakes", Value: "Muffins!"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"},
"X-Muffins-Or-Plumcakes": {"Muffins!"}}},
{[]v1.HTTPHeader{
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
{Name: "X-Muffins-Or-Cupcakes", Value: "Cupcakes, too"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins", "Cupcakes, too"}}},
}
for _, test := range testCases {
headers := buildHeader(test.input)
if !reflect.DeepEqual(test.output, headers) {
t.Errorf("Expected %#v, got %#v", test.output, headers)
}
}
}
func TestProbe(t *testing.T) {
containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"}
execProbe := &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{},
},
}
tests := []struct {
probe *v1.Probe
env []v1.EnvVar
execError bool
expectError bool
execResult probe.Result
expectedResult results.Result
expectCommand []string
}{
{ // No probe
probe: nil,
expectedResult: results.Success,
},
{ // No handler
probe: &v1.Probe{},
expectError: true,
expectedResult: results.Failure,
},
{ // Probe fails
probe: execProbe,
execResult: probe.Failure,
expectedResult: results.Failure,
},
{ // Probe succeeds
probe: execProbe,
execResult: probe.Success,
expectedResult: results.Success,
},
{ // Probe result is unknown
probe: execProbe,
execResult: probe.Unknown,
expectedResult: results.Failure,
},
{ // Probe has an error
probe: execProbe,
execError: true,
expectError: true,
execResult: probe.Unknown,
expectedResult: results.Failure,
},
{ // Probe arguments are passed through
probe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"/bin/bash", "-c", "some script"},
},
},
},
expectCommand: []string{"/bin/bash", "-c", "some script"},
execResult: probe.Success,
expectedResult: results.Success,
},
{ // Probe arguments are passed through
probe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"/bin/bash", "-c", "some $(A) $(B)"},
},
},
},
env: []v1.EnvVar{
{Name: "A", Value: "script"},
},
expectCommand: []string{"/bin/bash", "-c", "some script $(B)"},
execResult: probe.Success,
expectedResult: results.Success,
},
}
for i, test := range tests {
for _, probeType := range [...]probeType{liveness, readiness} {
prober := &prober{
refManager: kubecontainer.NewRefManager(),
recorder: &record.FakeRecorder{},
}
testID := fmt.Sprintf("%d-%s", i, probeType)
testContainer := v1.Container{Env: test.env}
switch probeType {
case liveness:
testContainer.LivenessProbe = test.probe
case readiness:
testContainer.ReadinessProbe = test.probe
}
if test.execError {
prober.exec = fakeExecProber{test.execResult, errors.New("exec error")}
} else {
prober.exec = fakeExecProber{test.execResult, nil}
}
result, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
if test.expectError && err == nil {
t.Errorf("[%s] Expected probe error but no error was returned.", testID)
}
if !test.expectError && err != nil {
t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err)
}
if test.expectedResult != result {
t.Errorf("[%s] Expected result to be %v but was %v", testID, test.expectedResult, result)
}
if len(test.expectCommand) > 0 {
prober.exec = execprobe.New()
prober.runner = &containertest.FakeContainerCommandRunner{}
_, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
if err != nil {
t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err)
continue
}
if !reflect.DeepEqual(test.expectCommand, prober.runner.(*containertest.FakeContainerCommandRunner).Cmd) {
t.Errorf("[%s] unexpected probe arguments: %v", testID, prober.runner.(*containertest.FakeContainerCommandRunner).Cmd)
}
}
}
}
}
func TestNewExecInContainer(t *testing.T) {
tests := []struct {
name string
err error
}{
{
name: "no error",
err: nil,
},
{
name: "error - make sure we get output",
err: errors.New("bad"),
},
}
for _, test := range tests {
runner := &containertest.FakeContainerCommandRunner{
Stdout: "foo",
Err: test.err,
}
prober := &prober{
runner: runner,
}
container := v1.Container{}
containerID := kubecontainer.ContainerID{Type: "docker", ID: "containerID"}
cmd := []string{"/foo", "bar"}
exec := prober.newExecInContainer(container, containerID, cmd, 0)
actualOutput, err := exec.CombinedOutput()
if e, a := containerID, runner.ContainerID; e != a {
t.Errorf("%s: container id: expected %v, got %v", test.name, e, a)
}
if e, a := cmd, runner.Cmd; !reflect.DeepEqual(e, a) {
t.Errorf("%s: cmd: expected %v, got %v", test.name, e, a)
}
// this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
if e, a := "foo", string(actualOutput); e != a {
t.Errorf("%s: output: expected %q, got %q", test.name, e, a)
}
if e, a := fmt.Sprintf("%v", test.err), fmt.Sprintf("%v", err); e != a {
t.Errorf("%s: error: expected %s, got %s", test.name, e, a)
}
}
}

View File

@ -0,0 +1,45 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["results_manager.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/prober/results",
deps = [
"//pkg/kubelet/container:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["results_manager_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/prober/results",
library = ":go_default_library",
deps = [
"//pkg/kubelet/container:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,121 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package results
import (
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// Manager provides a probe results cache and channel of updates.
type Manager interface {
// Get returns the cached result for the container with the given ID.
Get(kubecontainer.ContainerID) (Result, bool)
// Set sets the cached result for the container with the given ID.
// The pod is only included to be sent with the update.
Set(kubecontainer.ContainerID, Result, *v1.Pod)
// Remove clears the cached result for the container with the given ID.
Remove(kubecontainer.ContainerID)
// Updates creates a channel that receives an Update whenever its result changes (but not
// removed).
// NOTE: The current implementation only supports a single updates channel.
Updates() <-chan Update
}
// Result is the type for probe results.
type Result bool
const (
Success Result = true
Failure Result = false
)
func (r Result) String() string {
switch r {
case Success:
return "Success"
case Failure:
return "Failure"
default:
return "UNKNOWN"
}
}
// Update is an enum of the types of updates sent over the Updates channel.
type Update struct {
ContainerID kubecontainer.ContainerID
Result Result
PodUID types.UID
}
// Manager implementation.
type manager struct {
// guards the cache
sync.RWMutex
// map of container ID -> probe Result
cache map[kubecontainer.ContainerID]Result
// channel of updates
updates chan Update
}
var _ Manager = &manager{}
// NewManager creates ane returns an empty results manager.
func NewManager() Manager {
return &manager{
cache: make(map[kubecontainer.ContainerID]Result),
updates: make(chan Update, 20),
}
}
func (m *manager) Get(id kubecontainer.ContainerID) (Result, bool) {
m.RLock()
defer m.RUnlock()
result, found := m.cache[id]
return result, found
}
func (m *manager) Set(id kubecontainer.ContainerID, result Result, pod *v1.Pod) {
if m.setInternal(id, result) {
m.updates <- Update{id, result, pod.UID}
}
}
// Internal helper for locked portion of set. Returns whether an update should be sent.
func (m *manager) setInternal(id kubecontainer.ContainerID, result Result) bool {
m.Lock()
defer m.Unlock()
prev, exists := m.cache[id]
if !exists || prev != result {
m.cache[id] = result
return true
}
return false
}
func (m *manager) Remove(id kubecontainer.ContainerID) {
m.Lock()
defer m.Unlock()
delete(m.cache, id)
}
func (m *manager) Updates() <-chan Update {
return m.updates
}

View File

@ -0,0 +1,99 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package results
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
func TestCacheOperations(t *testing.T) {
m := NewManager()
unsetID := kubecontainer.ContainerID{Type: "test", ID: "unset"}
setID := kubecontainer.ContainerID{Type: "test", ID: "set"}
_, found := m.Get(unsetID)
assert.False(t, found, "unset result found")
m.Set(setID, Success, &v1.Pod{})
result, found := m.Get(setID)
assert.True(t, result == Success, "set result")
assert.True(t, found, "set result found")
m.Remove(setID)
_, found = m.Get(setID)
assert.False(t, found, "removed result found")
}
func TestUpdates(t *testing.T) {
m := NewManager()
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}}
fooID := kubecontainer.ContainerID{Type: "test", ID: "foo"}
barID := kubecontainer.ContainerID{Type: "test", ID: "bar"}
expectUpdate := func(expected Update, msg string) {
select {
case u := <-m.Updates():
if expected != u {
t.Errorf("Expected update %v, received %v: %s", expected, u, msg)
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Timed out waiting for update %v: %s", expected, msg)
}
}
expectNoUpdate := func(msg string) {
// NOTE: Since updates are accumulated asynchronously, this method is not guaranteed to fail
// when it should. In the event it misses a failure, the following calls to expectUpdate should
// still fail.
select {
case u := <-m.Updates():
t.Errorf("Unexpected update %v: %s", u, msg)
default:
// Pass
}
}
// New result should always push an update.
m.Set(fooID, Success, pod)
expectUpdate(Update{fooID, Success, pod.UID}, "new success")
m.Set(barID, Failure, pod)
expectUpdate(Update{barID, Failure, pod.UID}, "new failure")
// Unchanged results should not send an update.
m.Set(fooID, Success, pod)
expectNoUpdate("unchanged foo")
m.Set(barID, Failure, pod)
expectNoUpdate("unchanged bar")
// Changed results should send an update.
m.Set(fooID, Failure, pod)
expectUpdate(Update{fooID, Failure, pod.UID}, "changed foo")
m.Set(barID, Success, pod)
expectUpdate(Update{barID, Success, pod.UID}, "changed bar")
}

View File

@ -0,0 +1,29 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["fake_manager.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/prober/testing",
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,36 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
type FakeManager struct{}
// Unused methods.
func (_ FakeManager) AddPod(_ *v1.Pod) {}
func (_ FakeManager) RemovePod(_ *v1.Pod) {}
func (_ FakeManager) CleanupPods(_ []*v1.Pod) {}
func (_ FakeManager) Start() {}
func (_ FakeManager) UpdatePodStatus(_ types.UID, podStatus *v1.PodStatus) {
for i := range podStatus.ContainerStatuses {
podStatus.ContainerStatuses[i].Ready = true
}
}

232
vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go generated vendored Normal file
View File

@ -0,0 +1,232 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"math/rand"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
// worker handles the periodic probing of its assigned container. Each worker has a go-routine
// associated with it which runs the probe loop until the container permanently terminates, or the
// stop channel is closed. The worker uses the probe Manager's statusManager to get up-to-date
// container IDs.
type worker struct {
// Channel for stopping the probe.
stopCh chan struct{}
// The pod containing this probe (read-only)
pod *v1.Pod
// The container to probe (read-only)
container v1.Container
// Describes the probe configuration (read-only)
spec *v1.Probe
// The type of the worker.
probeType probeType
// The probe value during the initial delay.
initialValue results.Result
// Where to store this workers results.
resultsManager results.Manager
probeManager *manager
// The last known container ID for this worker.
containerID kubecontainer.ContainerID
// The last probe result for this worker.
lastResult results.Result
// How many times in a row the probe has returned the same result.
resultRun int
// If set, skip probing.
onHold bool
}
// Creates and starts a new probe worker.
func newWorker(
m *manager,
probeType probeType,
pod *v1.Pod,
container v1.Container) *worker {
w := &worker{
stopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking.
pod: pod,
container: container,
probeType: probeType,
probeManager: m,
}
switch probeType {
case readiness:
w.spec = container.ReadinessProbe
w.resultsManager = m.readinessManager
w.initialValue = results.Failure
case liveness:
w.spec = container.LivenessProbe
w.resultsManager = m.livenessManager
w.initialValue = results.Success
}
return w
}
// run periodically probes the container.
func (w *worker) run() {
probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second
// If kubelet restarted the probes could be started in rapid succession.
// Let the worker wait for a random portion of tickerPeriod before probing.
time.Sleep(time.Duration(rand.Float64() * float64(probeTickerPeriod)))
probeTicker := time.NewTicker(probeTickerPeriod)
defer func() {
// Clean up.
probeTicker.Stop()
if !w.containerID.IsEmpty() {
w.resultsManager.Remove(w.containerID)
}
w.probeManager.removeWorker(w.pod.UID, w.container.Name, w.probeType)
}()
probeLoop:
for w.doProbe() {
// Wait for next probe tick.
select {
case <-w.stopCh:
break probeLoop
case <-probeTicker.C:
// continue
}
}
}
// stop stops the probe worker. The worker handles cleanup and removes itself from its manager.
// It is safe to call stop multiple times.
func (w *worker) stop() {
select {
case w.stopCh <- struct{}{}:
default: // Non-blocking.
}
}
// doProbe probes the container once and records the result.
// Returns whether the worker should continue.
func (w *worker) doProbe() (keepGoing bool) {
defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging)
defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })
status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
if !ok {
// Either the pod has not been created yet, or it was already deleted.
glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod))
return true
}
// Worker should terminate if pod is terminated.
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
glog.V(3).Infof("Pod %v %v, exiting probe worker",
format.Pod(w.pod), status.Phase)
return false
}
c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name)
if !ok || len(c.ContainerID) == 0 {
// Either the container has not been created yet, or it was deleted.
glog.V(3).Infof("Probe target container not found: %v - %v",
format.Pod(w.pod), w.container.Name)
return true // Wait for more information.
}
if w.containerID.String() != c.ContainerID {
if !w.containerID.IsEmpty() {
w.resultsManager.Remove(w.containerID)
}
w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
w.resultsManager.Set(w.containerID, w.initialValue, w.pod)
// We've got a new container; resume probing.
w.onHold = false
}
if w.onHold {
// Worker is on hold until there is a new container.
return true
}
if c.State.Running == nil {
glog.V(3).Infof("Non-running container probed: %v - %v",
format.Pod(w.pod), w.container.Name)
if !w.containerID.IsEmpty() {
w.resultsManager.Set(w.containerID, results.Failure, w.pod)
}
// Abort if the container will not be restarted.
return c.State.Terminated == nil ||
w.pod.Spec.RestartPolicy != v1.RestartPolicyNever
}
if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
return true
}
// TODO: in order for exec probes to correctly handle downward API env, we must be able to reconstruct
// the full container environment here, OR we must make a call to the CRI in order to get those environment
// values from the running container.
result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
if err != nil {
// Prober error, throw away the result.
return true
}
if w.lastResult == result {
w.resultRun++
} else {
w.lastResult = result
w.resultRun = 1
}
if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) ||
(result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) {
// Success or failure is below threshold - leave the probe state unchanged.
return true
}
w.resultsManager.Set(w.containerID, result, w.pod)
if w.probeType == liveness && result == results.Failure {
// The container fails a liveness check, it will need to be restarted.
// Stop probing until we see a new container ID. This is to reduce the
// chance of hitting #21751, where running `docker exec` when a
// container is being stopped may lead to corrupted container state.
w.onHold = true
w.resultRun = 1
}
return true
}

View File

@ -0,0 +1,384 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"fmt"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/utils/exec"
)
func init() {
runtime.ReallyCrash = true
}
func TestDoProbe(t *testing.T) {
m := newTestManager()
// Test statuses.
runningStatus := getTestRunningStatus()
pendingStatus := getTestRunningStatus()
pendingStatus.ContainerStatuses[0].State.Running = nil
terminatedStatus := getTestRunningStatus()
terminatedStatus.ContainerStatuses[0].State.Running = nil
terminatedStatus.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{
StartedAt: metav1.Now(),
}
otherStatus := getTestRunningStatus()
otherStatus.ContainerStatuses[0].Name = "otherContainer"
failedStatus := getTestRunningStatus()
failedStatus.Phase = v1.PodFailed
tests := []struct {
probe v1.Probe
podStatus *v1.PodStatus
expectContinue bool
expectSet bool
expectedResult results.Result
}{
{ // No status.
expectContinue: true,
},
{ // Pod failed
podStatus: &failedStatus,
},
{ // No container status
podStatus: &otherStatus,
expectContinue: true,
},
{ // Container waiting
podStatus: &pendingStatus,
expectContinue: true,
expectSet: true,
},
{ // Container terminated
podStatus: &terminatedStatus,
expectSet: true,
},
{ // Probe successful.
podStatus: &runningStatus,
expectContinue: true,
expectSet: true,
expectedResult: results.Success,
},
{ // Initial delay passed
podStatus: &runningStatus,
probe: v1.Probe{
InitialDelaySeconds: -100,
},
expectContinue: true,
expectSet: true,
expectedResult: results.Success,
},
}
for _, probeType := range [...]probeType{liveness, readiness} {
for i, test := range tests {
w := newTestWorker(m, probeType, test.probe)
if test.podStatus != nil {
m.statusManager.SetPodStatus(w.pod, *test.podStatus)
}
if c := w.doProbe(); c != test.expectContinue {
t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue, c)
}
result, ok := resultsManager(m, probeType).Get(testContainerID)
if ok != test.expectSet {
t.Errorf("[%s-%d] Expected to have result: %v but got %v", probeType, i, test.expectSet, ok)
}
if result != test.expectedResult {
t.Errorf("[%s-%d] Expected result: %v but got %v", probeType, i, test.expectedResult, result)
}
// Clean up.
m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil, nil), &statustest.FakePodDeletionSafetyProvider{})
resultsManager(m, probeType).Remove(testContainerID)
}
}
}
func TestInitialDelay(t *testing.T) {
m := newTestManager()
for _, probeType := range [...]probeType{liveness, readiness} {
w := newTestWorker(m, probeType, v1.Probe{
InitialDelaySeconds: 10,
})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
expectContinue(t, w, w.doProbe(), "during initial delay")
expectResult(t, w, results.Result(probeType == liveness), "during initial delay")
// 100 seconds later...
laterStatus := getTestRunningStatus()
laterStatus.ContainerStatuses[0].State.Running.StartedAt.Time =
time.Now().Add(-100 * time.Second)
m.statusManager.SetPodStatus(w.pod, laterStatus)
// Second call should succeed (already waited).
expectContinue(t, w, w.doProbe(), "after initial delay")
expectResult(t, w, results.Success, "after initial delay")
}
}
func TestFailureThreshold(t *testing.T) {
m := newTestManager()
w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
for i := 0; i < 2; i++ {
// First probe should succeed.
m.prober.exec = fakeExecProber{probe.Success, nil}
for j := 0; j < 3; j++ {
msg := fmt.Sprintf("%d success (%d)", j+1, i)
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
}
// Prober starts failing :(
m.prober.exec = fakeExecProber{probe.Failure, nil}
// Next 2 probes should still be "success".
for j := 0; j < 2; j++ {
msg := fmt.Sprintf("%d failing (%d)", j+1, i)
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
}
// Third & following fail.
for j := 0; j < 3; j++ {
msg := fmt.Sprintf("%d failure (%d)", j+3, i)
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
}
}
}
func TestSuccessThreshold(t *testing.T) {
m := newTestManager()
w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
// Start out failure.
w.resultsManager.Set(testContainerID, results.Failure, &v1.Pod{})
for i := 0; i < 2; i++ {
// Probe defaults to Failure.
for j := 0; j < 2; j++ {
msg := fmt.Sprintf("%d success (%d)", j+1, i)
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
}
// Continuing success!
for j := 0; j < 3; j++ {
msg := fmt.Sprintf("%d success (%d)", j+3, i)
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
}
// Prober flakes :(
m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := fmt.Sprintf("1 failure (%d)", i)
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
// Back to success.
m.prober.exec = fakeExecProber{probe.Success, nil}
}
}
func TestCleanUp(t *testing.T) {
m := newTestManager()
for _, probeType := range [...]probeType{liveness, readiness} {
key := probeKey{testPodUID, testContainerName, probeType}
w := newTestWorker(m, probeType, v1.Probe{})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
go w.run()
m.workers[key] = w
// Wait for worker to run.
condition := func() (bool, error) {
ready, _ := resultsManager(m, probeType).Get(testContainerID)
return ready == results.Success, nil
}
if ready, _ := condition(); !ready {
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, condition); err != nil {
t.Fatalf("[%s] Error waiting for worker ready: %v", probeType, err)
}
}
for i := 0; i < 10; i++ {
w.stop() // Stop should be callable multiple times without consequence.
}
if err := waitForWorkerExit(m, []probeKey{key}); err != nil {
t.Fatalf("[%s] error waiting for worker exit: %v", probeType, err)
}
if _, ok := resultsManager(m, probeType).Get(testContainerID); ok {
t.Errorf("[%s] Expected result to be cleared.", probeType)
}
if _, ok := m.workers[key]; ok {
t.Errorf("[%s] Expected worker to be cleared.", probeType)
}
}
}
func TestHandleCrash(t *testing.T) {
runtime.ReallyCrash = false // Test that we *don't* really crash.
m := newTestManager()
w := newTestWorker(m, readiness, v1.Probe{})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
expectContinue(t, w, w.doProbe(), "Initial successful probe.")
expectResult(t, w, results.Success, "Initial successful probe.")
// Prober starts crashing.
m.prober = &prober{
refManager: kubecontainer.NewRefManager(),
recorder: &record.FakeRecorder{},
exec: crashingExecProber{},
}
// doProbe should recover from the crash, and keep going.
expectContinue(t, w, w.doProbe(), "Crashing probe.")
expectResult(t, w, results.Success, "Crashing probe unchanged.")
}
func expectResult(t *testing.T, w *worker, expectedResult results.Result, msg string) {
result, ok := resultsManager(w.probeManager, w.probeType).Get(w.containerID)
if !ok {
t.Errorf("[%s - %s] Expected result to be set, but was not set", w.probeType, msg)
} else if result != expectedResult {
t.Errorf("[%s - %s] Expected result to be %v, but was %v",
w.probeType, msg, expectedResult, result)
}
}
func expectContinue(t *testing.T, w *worker, c bool, msg string) {
if !c {
t.Errorf("[%s - %s] Expected to continue, but did not", w.probeType, msg)
}
}
func resultsManager(m *manager, probeType probeType) results.Manager {
switch probeType {
case readiness:
return m.readinessManager
case liveness:
return m.livenessManager
}
panic(fmt.Errorf("Unhandled case: %v", probeType))
}
type crashingExecProber struct{}
func (p crashingExecProber) Probe(_ exec.Cmd) (probe.Result, string, error) {
panic("Intentional Probe crash.")
}
func TestOnHoldOnLivenessCheckFailure(t *testing.T) {
m := newTestManager()
w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1})
status := getTestRunningStatus()
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
// First probe should fail.
m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := "first probe"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
if !w.onHold {
t.Errorf("Prober should be on hold due to liveness check failure")
}
// Set fakeExecProber to return success. However, the result will remain
// failure because the worker is on hold and won't probe.
m.prober.exec = fakeExecProber{probe.Success, nil}
msg = "while on hold"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
if !w.onHold {
t.Errorf("Prober should be on hold due to liveness check failure")
}
// Set a new container ID to lift the hold. The next probe will succeed.
status.ContainerStatuses[0].ContainerID = "test://newCont_ID"
m.statusManager.SetPodStatus(w.pod, status)
msg = "hold lifted"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
if w.onHold {
t.Errorf("Prober should not be on hold anymore")
}
}
func TestResultRunOnLivenessCheckFailure(t *testing.T) {
m := newTestManager()
w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
m.prober.exec = fakeExecProber{probe.Success, nil}
msg := "inital probe success"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
if w.resultRun != 1 {
t.Errorf("Prober resultRun should 1")
}
m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "probe failure, result success"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
if w.resultRun != 1 {
t.Errorf("Prober resultRun should 1")
}
m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "2nd probe failure, result success"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Success, msg)
if w.resultRun != 2 {
t.Errorf("Prober resultRun should be 2")
}
// Exceeding FailureThreshold should cause resultRun to
// reset to 1 so that the probe on the restarted pod
// also gets FailureThreshold attempts to succeed.
m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "3rd probe failure, result failure"
expectContinue(t, w, w.doProbe(), msg)
expectResult(t, w, results.Failure, msg)
if w.resultRun != 1 {
t.Errorf("Prober resultRun should be reset to 1")
}
}