mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
55
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"generic.go",
|
||||
"pleg.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/pleg",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["generic_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/pleg",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package pleg contains types and a generic implementation of the pod
|
||||
// lifecycle event generator.
|
||||
package pleg // import "k8s.io/kubernetes/pkg/kubelet/pleg"
|
464
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go
generated
vendored
Normal file
464
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go
generated
vendored
Normal file
@ -0,0 +1,464 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pleg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
)
|
||||
|
||||
// GenericPLEG is an extremely simple generic PLEG that relies solely on
|
||||
// periodic listing to discover container changes. It should be used
|
||||
// as temporary replacement for container runtimes do not support a proper
|
||||
// event generator yet.
|
||||
//
|
||||
// Note that GenericPLEG assumes that a container would not be created,
|
||||
// terminated, and garbage collected within one relist period. If such an
|
||||
// incident happens, GenenricPLEG would miss all events regarding this
|
||||
// container. In the case of relisting failure, the window may become longer.
|
||||
// Note that this assumption is not unique -- many kubelet internal components
|
||||
// rely on terminated containers as tombstones for bookkeeping purposes. The
|
||||
// garbage collector is implemented to work with such situations. However, to
|
||||
// guarantee that kubelet can handle missing container events, it is
|
||||
// recommended to set the relist period short and have an auxiliary, longer
|
||||
// periodic sync in kubelet as the safety net.
|
||||
type GenericPLEG struct {
|
||||
// The period for relisting.
|
||||
relistPeriod time.Duration
|
||||
// The container runtime.
|
||||
runtime kubecontainer.Runtime
|
||||
// The channel from which the subscriber listens events.
|
||||
eventChannel chan *PodLifecycleEvent
|
||||
// The internal cache for pod/container information.
|
||||
podRecords podRecords
|
||||
// Time of the last relisting.
|
||||
relistTime atomic.Value
|
||||
// Cache for storing the runtime states required for syncing pods.
|
||||
cache kubecontainer.Cache
|
||||
// For testability.
|
||||
clock clock.Clock
|
||||
// Pods that failed to have their status retrieved during a relist. These pods will be
|
||||
// retried during the next relisting.
|
||||
podsToReinspect map[types.UID]*kubecontainer.Pod
|
||||
}
|
||||
|
||||
// plegContainerState has a one-to-one mapping to the
|
||||
// kubecontainer.ContainerState except for the non-existent state. This state
|
||||
// is introduced here to complete the state transition scenarios.
|
||||
type plegContainerState string
|
||||
|
||||
const (
|
||||
plegContainerRunning plegContainerState = "running"
|
||||
plegContainerExited plegContainerState = "exited"
|
||||
plegContainerUnknown plegContainerState = "unknown"
|
||||
plegContainerNonExistent plegContainerState = "non-existent"
|
||||
|
||||
// The threshold needs to be greater than the relisting period + the
|
||||
// relisting time, which can vary significantly. Set a conservative
|
||||
// threshold to avoid flipping between healthy and unhealthy.
|
||||
relistThreshold = 3 * time.Minute
|
||||
)
|
||||
|
||||
func convertState(state kubecontainer.ContainerState) plegContainerState {
|
||||
switch state {
|
||||
case kubecontainer.ContainerStateCreated:
|
||||
// kubelet doesn't use the "created" state yet, hence convert it to "unknown".
|
||||
return plegContainerUnknown
|
||||
case kubecontainer.ContainerStateRunning:
|
||||
return plegContainerRunning
|
||||
case kubecontainer.ContainerStateExited:
|
||||
return plegContainerExited
|
||||
case kubecontainer.ContainerStateUnknown:
|
||||
return plegContainerUnknown
|
||||
default:
|
||||
panic(fmt.Sprintf("unrecognized container state: %v", state))
|
||||
}
|
||||
}
|
||||
|
||||
type podRecord struct {
|
||||
old *kubecontainer.Pod
|
||||
current *kubecontainer.Pod
|
||||
}
|
||||
|
||||
type podRecords map[types.UID]*podRecord
|
||||
|
||||
func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int,
|
||||
relistPeriod time.Duration, cache kubecontainer.Cache, clock clock.Clock) PodLifecycleEventGenerator {
|
||||
return &GenericPLEG{
|
||||
relistPeriod: relistPeriod,
|
||||
runtime: runtime,
|
||||
eventChannel: make(chan *PodLifecycleEvent, channelCapacity),
|
||||
podRecords: make(podRecords),
|
||||
cache: cache,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a channel from which the subscriber can receive PodLifecycleEvent
|
||||
// events.
|
||||
// TODO: support multiple subscribers.
|
||||
func (g *GenericPLEG) Watch() chan *PodLifecycleEvent {
|
||||
return g.eventChannel
|
||||
}
|
||||
|
||||
// Start spawns a goroutine to relist periodically.
|
||||
func (g *GenericPLEG) Start() {
|
||||
go wait.Until(g.relist, g.relistPeriod, wait.NeverStop)
|
||||
}
|
||||
|
||||
func (g *GenericPLEG) Healthy() (bool, error) {
|
||||
relistTime := g.getRelistTime()
|
||||
elapsed := g.clock.Since(relistTime)
|
||||
if elapsed > relistThreshold {
|
||||
return false, fmt.Errorf("pleg was last seen active %v ago; threshold is %v", elapsed, relistThreshold)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func generateEvents(podID types.UID, cid string, oldState, newState plegContainerState) []*PodLifecycleEvent {
|
||||
if newState == oldState {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GenericPLEG: %v/%v: %v -> %v", podID, cid, oldState, newState)
|
||||
switch newState {
|
||||
case plegContainerRunning:
|
||||
return []*PodLifecycleEvent{{ID: podID, Type: ContainerStarted, Data: cid}}
|
||||
case plegContainerExited:
|
||||
return []*PodLifecycleEvent{{ID: podID, Type: ContainerDied, Data: cid}}
|
||||
case plegContainerUnknown:
|
||||
return []*PodLifecycleEvent{{ID: podID, Type: ContainerChanged, Data: cid}}
|
||||
case plegContainerNonExistent:
|
||||
switch oldState {
|
||||
case plegContainerExited:
|
||||
// We already reported that the container died before.
|
||||
return []*PodLifecycleEvent{{ID: podID, Type: ContainerRemoved, Data: cid}}
|
||||
default:
|
||||
return []*PodLifecycleEvent{{ID: podID, Type: ContainerDied, Data: cid}, {ID: podID, Type: ContainerRemoved, Data: cid}}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unrecognized container state: %v", newState))
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GenericPLEG) getRelistTime() time.Time {
|
||||
val := g.relistTime.Load()
|
||||
if val == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return val.(time.Time)
|
||||
}
|
||||
|
||||
func (g *GenericPLEG) updateRelistTime(timestamp time.Time) {
|
||||
g.relistTime.Store(timestamp)
|
||||
}
|
||||
|
||||
// relist queries the container runtime for list of pods/containers, compare
|
||||
// with the internal pods/containers, and generates events accordingly.
|
||||
func (g *GenericPLEG) relist() {
|
||||
glog.V(5).Infof("GenericPLEG: Relisting")
|
||||
|
||||
if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() {
|
||||
metrics.PLEGRelistInterval.Observe(metrics.SinceInMicroseconds(lastRelistTime))
|
||||
}
|
||||
|
||||
timestamp := g.clock.Now()
|
||||
defer func() {
|
||||
metrics.PLEGRelistLatency.Observe(metrics.SinceInMicroseconds(timestamp))
|
||||
}()
|
||||
|
||||
// Get all the pods.
|
||||
podList, err := g.runtime.GetPods(true)
|
||||
if err != nil {
|
||||
glog.Errorf("GenericPLEG: Unable to retrieve pods: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
g.updateRelistTime(timestamp)
|
||||
|
||||
pods := kubecontainer.Pods(podList)
|
||||
g.podRecords.setCurrent(pods)
|
||||
|
||||
// Compare the old and the current pods, and generate events.
|
||||
eventsByPodID := map[types.UID][]*PodLifecycleEvent{}
|
||||
for pid := range g.podRecords {
|
||||
oldPod := g.podRecords.getOld(pid)
|
||||
pod := g.podRecords.getCurrent(pid)
|
||||
// Get all containers in the old and the new pod.
|
||||
allContainers := getContainersFromPods(oldPod, pod)
|
||||
for _, container := range allContainers {
|
||||
events := computeEvents(oldPod, pod, &container.ID)
|
||||
for _, e := range events {
|
||||
updateEvents(eventsByPodID, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var needsReinspection map[types.UID]*kubecontainer.Pod
|
||||
if g.cacheEnabled() {
|
||||
needsReinspection = make(map[types.UID]*kubecontainer.Pod)
|
||||
}
|
||||
|
||||
// If there are events associated with a pod, we should update the
|
||||
// podCache.
|
||||
for pid, events := range eventsByPodID {
|
||||
pod := g.podRecords.getCurrent(pid)
|
||||
if g.cacheEnabled() {
|
||||
// updateCache() will inspect the pod and update the cache. If an
|
||||
// error occurs during the inspection, we want PLEG to retry again
|
||||
// in the next relist. To achieve this, we do not update the
|
||||
// associated podRecord of the pod, so that the change will be
|
||||
// detect again in the next relist.
|
||||
// TODO: If many pods changed during the same relist period,
|
||||
// inspecting the pod and getting the PodStatus to update the cache
|
||||
// serially may take a while. We should be aware of this and
|
||||
// parallelize if needed.
|
||||
if err := g.updateCache(pod, pid); err != nil {
|
||||
glog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err)
|
||||
|
||||
// make sure we try to reinspect the pod during the next relisting
|
||||
needsReinspection[pid] = pod
|
||||
|
||||
continue
|
||||
} else if _, found := g.podsToReinspect[pid]; found {
|
||||
// this pod was in the list to reinspect and we did so because it had events, so remove it
|
||||
// from the list (we don't want the reinspection code below to inspect it a second time in
|
||||
// this relist execution)
|
||||
delete(g.podsToReinspect, pid)
|
||||
}
|
||||
}
|
||||
// Update the internal storage and send out the events.
|
||||
g.podRecords.update(pid)
|
||||
for i := range events {
|
||||
// Filter out events that are not reliable and no other components use yet.
|
||||
if events[i].Type == ContainerChanged {
|
||||
continue
|
||||
}
|
||||
g.eventChannel <- events[i]
|
||||
}
|
||||
}
|
||||
|
||||
if g.cacheEnabled() {
|
||||
// reinspect any pods that failed inspection during the previous relist
|
||||
if len(g.podsToReinspect) > 0 {
|
||||
glog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection")
|
||||
for pid, pod := range g.podsToReinspect {
|
||||
if err := g.updateCache(pod, pid); err != nil {
|
||||
glog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err)
|
||||
needsReinspection[pid] = pod
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the cache timestamp. This needs to happen *after*
|
||||
// all pods have been properly updated in the cache.
|
||||
g.cache.UpdateTime(timestamp)
|
||||
}
|
||||
|
||||
// make sure we retain the list of pods that need reinspecting the next time relist is called
|
||||
g.podsToReinspect = needsReinspection
|
||||
}
|
||||
|
||||
func getContainersFromPods(pods ...*kubecontainer.Pod) []*kubecontainer.Container {
|
||||
cidSet := sets.NewString()
|
||||
var containers []*kubecontainer.Container
|
||||
for _, p := range pods {
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
for _, c := range p.Containers {
|
||||
cid := string(c.ID.ID)
|
||||
if cidSet.Has(cid) {
|
||||
continue
|
||||
}
|
||||
cidSet.Insert(cid)
|
||||
containers = append(containers, c)
|
||||
}
|
||||
// Update sandboxes as containers
|
||||
// TODO: keep track of sandboxes explicitly.
|
||||
for _, c := range p.Sandboxes {
|
||||
cid := string(c.ID.ID)
|
||||
if cidSet.Has(cid) {
|
||||
continue
|
||||
}
|
||||
cidSet.Insert(cid)
|
||||
containers = append(containers, c)
|
||||
}
|
||||
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
func computeEvents(oldPod, newPod *kubecontainer.Pod, cid *kubecontainer.ContainerID) []*PodLifecycleEvent {
|
||||
var pid types.UID
|
||||
if oldPod != nil {
|
||||
pid = oldPod.ID
|
||||
} else if newPod != nil {
|
||||
pid = newPod.ID
|
||||
}
|
||||
oldState := getContainerState(oldPod, cid)
|
||||
newState := getContainerState(newPod, cid)
|
||||
return generateEvents(pid, cid.ID, oldState, newState)
|
||||
}
|
||||
|
||||
func (g *GenericPLEG) cacheEnabled() bool {
|
||||
return g.cache != nil
|
||||
}
|
||||
|
||||
// Preserve an older cached status' pod IP if the new status has no pod IP
|
||||
// and its sandboxes have exited
|
||||
func (g *GenericPLEG) getPodIP(pid types.UID, status *kubecontainer.PodStatus) string {
|
||||
if status.IP != "" {
|
||||
return status.IP
|
||||
}
|
||||
|
||||
oldStatus, err := g.cache.Get(pid)
|
||||
if err != nil || oldStatus.IP == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, sandboxStatus := range status.SandboxStatuses {
|
||||
// If at least one sandbox is ready, then use this status update's pod IP
|
||||
if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
return status.IP
|
||||
}
|
||||
}
|
||||
|
||||
if len(status.SandboxStatuses) == 0 {
|
||||
// Without sandboxes (which built-in runtimes like rkt don't report)
|
||||
// look at all the container statuses, and if any containers are
|
||||
// running then use the new pod IP
|
||||
for _, containerStatus := range status.ContainerStatuses {
|
||||
if containerStatus.State == kubecontainer.ContainerStateCreated || containerStatus.State == kubecontainer.ContainerStateRunning {
|
||||
return status.IP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For pods with no ready containers or sandboxes (like exited pods)
|
||||
// use the old status' pod IP
|
||||
return oldStatus.IP
|
||||
}
|
||||
|
||||
func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
|
||||
if pod == nil {
|
||||
// The pod is missing in the current relist. This means that
|
||||
// the pod has no visible (active or inactive) containers.
|
||||
glog.V(4).Infof("PLEG: Delete status for pod %q", string(pid))
|
||||
g.cache.Delete(pid)
|
||||
return nil
|
||||
}
|
||||
timestamp := g.clock.Now()
|
||||
// TODO: Consider adding a new runtime method
|
||||
// GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing
|
||||
// all containers again.
|
||||
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace)
|
||||
glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err)
|
||||
if err == nil {
|
||||
// Preserve the pod IP across cache updates if the new IP is empty.
|
||||
// When a pod is torn down, kubelet may race with PLEG and retrieve
|
||||
// a pod status after network teardown, but the kubernetes API expects
|
||||
// the completed pod's IP to be available after the pod is dead.
|
||||
status.IP = g.getPodIP(pid, status)
|
||||
}
|
||||
|
||||
g.cache.Set(pod.ID, status, err, timestamp)
|
||||
return err
|
||||
}
|
||||
|
||||
func updateEvents(eventsByPodID map[types.UID][]*PodLifecycleEvent, e *PodLifecycleEvent) {
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
eventsByPodID[e.ID] = append(eventsByPodID[e.ID], e)
|
||||
}
|
||||
|
||||
func getContainerState(pod *kubecontainer.Pod, cid *kubecontainer.ContainerID) plegContainerState {
|
||||
// Default to the non-existent state.
|
||||
state := plegContainerNonExistent
|
||||
if pod == nil {
|
||||
return state
|
||||
}
|
||||
c := pod.FindContainerByID(*cid)
|
||||
if c != nil {
|
||||
return convertState(c.State)
|
||||
}
|
||||
// Search through sandboxes too.
|
||||
c = pod.FindSandboxByID(*cid)
|
||||
if c != nil {
|
||||
return convertState(c.State)
|
||||
}
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
func (pr podRecords) getOld(id types.UID) *kubecontainer.Pod {
|
||||
r, ok := pr[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return r.old
|
||||
}
|
||||
|
||||
func (pr podRecords) getCurrent(id types.UID) *kubecontainer.Pod {
|
||||
r, ok := pr[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return r.current
|
||||
}
|
||||
|
||||
func (pr podRecords) setCurrent(pods []*kubecontainer.Pod) {
|
||||
for i := range pr {
|
||||
pr[i].current = nil
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if r, ok := pr[pod.ID]; ok {
|
||||
r.current = pod
|
||||
} else {
|
||||
pr[pod.ID] = &podRecord{current: pod}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pr podRecords) update(id types.UID) {
|
||||
r, ok := pr[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
pr.updateInternal(id, r)
|
||||
}
|
||||
|
||||
func (pr podRecords) updateInternal(id types.UID, r *podRecord) {
|
||||
if r.current == nil {
|
||||
// Pod no longer exists; delete the entry.
|
||||
delete(pr, id)
|
||||
return
|
||||
}
|
||||
r.old = r.current
|
||||
r.current = nil
|
||||
}
|
553
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go
generated
vendored
Normal file
553
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go
generated
vendored
Normal file
@ -0,0 +1,553 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pleg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testContainerRuntimeType = "fooRuntime"
|
||||
)
|
||||
|
||||
type TestGenericPLEG struct {
|
||||
pleg *GenericPLEG
|
||||
runtime *containertest.FakeRuntime
|
||||
clock *clock.FakeClock
|
||||
}
|
||||
|
||||
func newTestGenericPLEG() *TestGenericPLEG {
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
clock := clock.NewFakeClock(time.Time{})
|
||||
// The channel capacity should be large enough to hold all events in a
|
||||
// single test.
|
||||
pleg := &GenericPLEG{
|
||||
relistPeriod: time.Hour,
|
||||
runtime: fakeRuntime,
|
||||
eventChannel: make(chan *PodLifecycleEvent, 100),
|
||||
podRecords: make(podRecords),
|
||||
clock: clock,
|
||||
}
|
||||
return &TestGenericPLEG{pleg: pleg, runtime: fakeRuntime, clock: clock}
|
||||
}
|
||||
|
||||
func getEventsFromChannel(ch <-chan *PodLifecycleEvent) []*PodLifecycleEvent {
|
||||
events := []*PodLifecycleEvent{}
|
||||
for len(ch) > 0 {
|
||||
e := <-ch
|
||||
events = append(events, e)
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
func createTestContainer(ID string, state kubecontainer.ContainerState) *kubecontainer.Container {
|
||||
return &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{Type: testContainerRuntimeType, ID: ID},
|
||||
State: state,
|
||||
}
|
||||
}
|
||||
|
||||
type sortableEvents []*PodLifecycleEvent
|
||||
|
||||
func (a sortableEvents) Len() int { return len(a) }
|
||||
func (a sortableEvents) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a sortableEvents) Less(i, j int) bool {
|
||||
if a[i].ID != a[j].ID {
|
||||
return a[i].ID < a[j].ID
|
||||
}
|
||||
return a[i].Data.(string) < a[j].Data.(string)
|
||||
}
|
||||
|
||||
func verifyEvents(t *testing.T, expected, actual []*PodLifecycleEvent) {
|
||||
sort.Sort(sortableEvents(expected))
|
||||
sort.Sort(sortableEvents(actual))
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Actual events differ from the expected; diff:\n %v", diff.ObjectDiff(expected, actual))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRelisting(t *testing.T) {
|
||||
testPleg := newTestGenericPLEG()
|
||||
pleg, runtime := testPleg.pleg, testPleg.runtime
|
||||
ch := pleg.Watch()
|
||||
// The first relist should send a PodSync event to each pod.
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c1", kubecontainer.ContainerStateExited),
|
||||
createTestContainer("c2", kubecontainer.ContainerStateRunning),
|
||||
createTestContainer("c3", kubecontainer.ContainerStateUnknown),
|
||||
},
|
||||
}},
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "4567",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c1", kubecontainer.ContainerStateExited),
|
||||
},
|
||||
}},
|
||||
}
|
||||
pleg.relist()
|
||||
// Report every running/exited container if we see them for the first time.
|
||||
expected := []*PodLifecycleEvent{
|
||||
{ID: "1234", Type: ContainerStarted, Data: "c2"},
|
||||
{ID: "4567", Type: ContainerDied, Data: "c1"},
|
||||
{ID: "1234", Type: ContainerDied, Data: "c1"},
|
||||
}
|
||||
actual := getEventsFromChannel(ch)
|
||||
verifyEvents(t, expected, actual)
|
||||
|
||||
// The second relist should not send out any event because no container has
|
||||
// changed.
|
||||
pleg.relist()
|
||||
verifyEvents(t, expected, actual)
|
||||
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c2", kubecontainer.ContainerStateExited),
|
||||
createTestContainer("c3", kubecontainer.ContainerStateRunning),
|
||||
},
|
||||
}},
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "4567",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c4", kubecontainer.ContainerStateRunning),
|
||||
},
|
||||
}},
|
||||
}
|
||||
pleg.relist()
|
||||
// Only report containers that transitioned to running or exited status.
|
||||
expected = []*PodLifecycleEvent{
|
||||
{ID: "1234", Type: ContainerRemoved, Data: "c1"},
|
||||
{ID: "1234", Type: ContainerDied, Data: "c2"},
|
||||
{ID: "1234", Type: ContainerStarted, Data: "c3"},
|
||||
{ID: "4567", Type: ContainerRemoved, Data: "c1"},
|
||||
{ID: "4567", Type: ContainerStarted, Data: "c4"},
|
||||
}
|
||||
|
||||
actual = getEventsFromChannel(ch)
|
||||
verifyEvents(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestDetectingContainerDeaths(t *testing.T) {
|
||||
// Vary the number of relists after the container started and before the
|
||||
// container died to account for the changes in pleg's internal states.
|
||||
testReportMissingContainers(t, 1)
|
||||
testReportMissingPods(t, 1)
|
||||
|
||||
testReportMissingContainers(t, 3)
|
||||
testReportMissingPods(t, 3)
|
||||
}
|
||||
|
||||
func testReportMissingContainers(t *testing.T, numRelists int) {
|
||||
testPleg := newTestGenericPLEG()
|
||||
pleg, runtime := testPleg.pleg, testPleg.runtime
|
||||
ch := pleg.Watch()
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c1", kubecontainer.ContainerStateRunning),
|
||||
createTestContainer("c2", kubecontainer.ContainerStateRunning),
|
||||
createTestContainer("c3", kubecontainer.ContainerStateExited),
|
||||
},
|
||||
}},
|
||||
}
|
||||
// Relist and drain the events from the channel.
|
||||
for i := 0; i < numRelists; i++ {
|
||||
pleg.relist()
|
||||
getEventsFromChannel(ch)
|
||||
}
|
||||
|
||||
// Container c2 was stopped and removed between relists. We should report
|
||||
// the event. The exited container c3 was garbage collected (i.e., removed)
|
||||
// between relists. We should ignore that event.
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c1", kubecontainer.ContainerStateRunning),
|
||||
},
|
||||
}},
|
||||
}
|
||||
pleg.relist()
|
||||
expected := []*PodLifecycleEvent{
|
||||
{ID: "1234", Type: ContainerDied, Data: "c2"},
|
||||
{ID: "1234", Type: ContainerRemoved, Data: "c2"},
|
||||
{ID: "1234", Type: ContainerRemoved, Data: "c3"},
|
||||
}
|
||||
actual := getEventsFromChannel(ch)
|
||||
verifyEvents(t, expected, actual)
|
||||
}
|
||||
|
||||
func testReportMissingPods(t *testing.T, numRelists int) {
|
||||
testPleg := newTestGenericPLEG()
|
||||
pleg, runtime := testPleg.pleg, testPleg.runtime
|
||||
ch := pleg.Watch()
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Containers: []*kubecontainer.Container{
|
||||
createTestContainer("c2", kubecontainer.ContainerStateRunning),
|
||||
},
|
||||
}},
|
||||
}
|
||||
// Relist and drain the events from the channel.
|
||||
for i := 0; i < numRelists; i++ {
|
||||
pleg.relist()
|
||||
getEventsFromChannel(ch)
|
||||
}
|
||||
|
||||
// Container c2 was stopped and removed between relists. We should report
|
||||
// the event.
|
||||
runtime.AllPodList = []*containertest.FakePod{}
|
||||
pleg.relist()
|
||||
expected := []*PodLifecycleEvent{
|
||||
{ID: "1234", Type: ContainerDied, Data: "c2"},
|
||||
{ID: "1234", Type: ContainerRemoved, Data: "c2"},
|
||||
}
|
||||
actual := getEventsFromChannel(ch)
|
||||
verifyEvents(t, expected, actual)
|
||||
}
|
||||
|
||||
func newTestGenericPLEGWithRuntimeMock() (*GenericPLEG, *containertest.Mock) {
|
||||
runtimeMock := &containertest.Mock{}
|
||||
pleg := &GenericPLEG{
|
||||
relistPeriod: time.Hour,
|
||||
runtime: runtimeMock,
|
||||
eventChannel: make(chan *PodLifecycleEvent, 100),
|
||||
podRecords: make(podRecords),
|
||||
cache: kubecontainer.NewCache(),
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
return pleg, runtimeMock
|
||||
}
|
||||
|
||||
func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecontainer.PodStatus, []*PodLifecycleEvent) {
|
||||
var pods []*kubecontainer.Pod
|
||||
var statuses []*kubecontainer.PodStatus
|
||||
var events []*PodLifecycleEvent
|
||||
for i := 0; i < num; i++ {
|
||||
id := types.UID(fmt.Sprintf("test-pod-%d", i))
|
||||
cState := kubecontainer.ContainerStateRunning
|
||||
container := createTestContainer(fmt.Sprintf("c%d", i), cState)
|
||||
pod := &kubecontainer.Pod{
|
||||
ID: id,
|
||||
Containers: []*kubecontainer.Container{container},
|
||||
}
|
||||
status := &kubecontainer.PodStatus{
|
||||
ID: id,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
|
||||
}
|
||||
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
|
||||
pods = append(pods, pod)
|
||||
statuses = append(statuses, status)
|
||||
events = append(events, event)
|
||||
|
||||
}
|
||||
return pods, statuses, events
|
||||
}
|
||||
|
||||
func TestRelistWithCache(t *testing.T) {
|
||||
pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
|
||||
ch := pleg.Watch()
|
||||
|
||||
pods, statuses, events := createTestPodsStatusesAndEvents(2)
|
||||
runtimeMock.On("GetPods", true).Return(pods, nil)
|
||||
runtimeMock.On("GetPodStatus", pods[0].ID, "", "").Return(statuses[0], nil).Once()
|
||||
// Inject an error when querying runtime for the pod status for pods[1].
|
||||
statusErr := fmt.Errorf("unable to get status")
|
||||
runtimeMock.On("GetPodStatus", pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Once()
|
||||
|
||||
pleg.relist()
|
||||
actualEvents := getEventsFromChannel(ch)
|
||||
cases := []struct {
|
||||
pod *kubecontainer.Pod
|
||||
status *kubecontainer.PodStatus
|
||||
error error
|
||||
}{
|
||||
{pod: pods[0], status: statuses[0], error: nil},
|
||||
{pod: pods[1], status: &kubecontainer.PodStatus{}, error: statusErr},
|
||||
}
|
||||
for i, c := range cases {
|
||||
testStr := fmt.Sprintf("test[%d]", i)
|
||||
actualStatus, actualErr := pleg.cache.Get(c.pod.ID)
|
||||
assert.Equal(t, c.status, actualStatus, testStr)
|
||||
assert.Equal(t, c.error, actualErr, testStr)
|
||||
}
|
||||
// pleg should not generate any event for pods[1] because of the error.
|
||||
assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents)
|
||||
|
||||
// Return normal status for pods[1].
|
||||
runtimeMock.On("GetPodStatus", pods[1].ID, "", "").Return(statuses[1], nil).Once()
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
cases = []struct {
|
||||
pod *kubecontainer.Pod
|
||||
status *kubecontainer.PodStatus
|
||||
error error
|
||||
}{
|
||||
{pod: pods[0], status: statuses[0], error: nil},
|
||||
{pod: pods[1], status: statuses[1], error: nil},
|
||||
}
|
||||
for i, c := range cases {
|
||||
testStr := fmt.Sprintf("test[%d]", i)
|
||||
actualStatus, actualErr := pleg.cache.Get(c.pod.ID)
|
||||
assert.Equal(t, c.status, actualStatus, testStr)
|
||||
assert.Equal(t, c.error, actualErr, testStr)
|
||||
}
|
||||
// Now that we are able to query status for pods[1], pleg should generate an event.
|
||||
assert.Exactly(t, []*PodLifecycleEvent{events[1]}, actualEvents)
|
||||
}
|
||||
|
||||
func TestRemoveCacheEntry(t *testing.T) {
|
||||
pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
|
||||
pods, statuses, _ := createTestPodsStatusesAndEvents(1)
|
||||
runtimeMock.On("GetPods", true).Return(pods, nil).Once()
|
||||
runtimeMock.On("GetPodStatus", pods[0].ID, "", "").Return(statuses[0], nil).Once()
|
||||
// Does a relist to populate the cache.
|
||||
pleg.relist()
|
||||
// Delete the pod from runtime. Verify that the cache entry has been
|
||||
// removed after relisting.
|
||||
runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{}, nil).Once()
|
||||
pleg.relist()
|
||||
actualStatus, actualErr := pleg.cache.Get(pods[0].ID)
|
||||
assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus)
|
||||
assert.Equal(t, nil, actualErr)
|
||||
}
|
||||
|
||||
func TestHealthy(t *testing.T) {
|
||||
testPleg := newTestGenericPLEG()
|
||||
pleg, _, clock := testPleg.pleg, testPleg.runtime, testPleg.clock
|
||||
ok, _ := pleg.Healthy()
|
||||
assert.True(t, ok, "pleg should be healthy")
|
||||
|
||||
// Advance the clock without any relisting.
|
||||
clock.Step(time.Minute * 10)
|
||||
ok, _ = pleg.Healthy()
|
||||
assert.False(t, ok, "pleg should be unhealthy")
|
||||
|
||||
// Relist and than advance the time by 1 minute. pleg should be healthy
|
||||
// because this is within the allowed limit.
|
||||
pleg.relist()
|
||||
clock.Step(time.Minute * 1)
|
||||
ok, _ = pleg.Healthy()
|
||||
assert.True(t, ok, "pleg should be healthy")
|
||||
}
|
||||
|
||||
func TestRelistWithReinspection(t *testing.T) {
|
||||
pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
|
||||
ch := pleg.Watch()
|
||||
|
||||
infraContainer := createTestContainer("infra", kubecontainer.ContainerStateRunning)
|
||||
|
||||
podID := types.UID("test-pod")
|
||||
pods := []*kubecontainer.Pod{{
|
||||
ID: podID,
|
||||
Containers: []*kubecontainer.Container{infraContainer},
|
||||
}}
|
||||
runtimeMock.On("GetPods", true).Return(pods, nil).Once()
|
||||
|
||||
goodStatus := &kubecontainer.PodStatus{
|
||||
ID: podID,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: infraContainer.ID, State: infraContainer.State}},
|
||||
}
|
||||
runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once()
|
||||
|
||||
goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID}
|
||||
|
||||
// listing 1 - everything ok, infra container set up for pod
|
||||
pleg.relist()
|
||||
actualEvents := getEventsFromChannel(ch)
|
||||
actualStatus, actualErr := pleg.cache.Get(podID)
|
||||
assert.Equal(t, goodStatus, actualStatus)
|
||||
assert.Equal(t, nil, actualErr)
|
||||
assert.Exactly(t, []*PodLifecycleEvent{goodEvent}, actualEvents)
|
||||
|
||||
// listing 2 - pretend runtime was in the middle of creating the non-infra container for the pod
|
||||
// and return an error during inspection
|
||||
transientContainer := createTestContainer("transient", kubecontainer.ContainerStateUnknown)
|
||||
podsWithTransientContainer := []*kubecontainer.Pod{{
|
||||
ID: podID,
|
||||
Containers: []*kubecontainer.Container{infraContainer, transientContainer},
|
||||
}}
|
||||
runtimeMock.On("GetPods", true).Return(podsWithTransientContainer, nil).Once()
|
||||
|
||||
badStatus := &kubecontainer.PodStatus{
|
||||
ID: podID,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{},
|
||||
}
|
||||
runtimeMock.On("GetPodStatus", podID, "", "").Return(badStatus, errors.New("inspection error")).Once()
|
||||
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
actualStatus, actualErr = pleg.cache.Get(podID)
|
||||
assert.Equal(t, badStatus, actualStatus)
|
||||
assert.Equal(t, errors.New("inspection error"), actualErr)
|
||||
assert.Exactly(t, []*PodLifecycleEvent{}, actualEvents)
|
||||
|
||||
// listing 3 - pretend the transient container has now disappeared, leaving just the infra
|
||||
// container. Make sure the pod is reinspected for its status and the cache is updated.
|
||||
runtimeMock.On("GetPods", true).Return(pods, nil).Once()
|
||||
runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once()
|
||||
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
actualStatus, actualErr = pleg.cache.Get(podID)
|
||||
assert.Equal(t, goodStatus, actualStatus)
|
||||
assert.Equal(t, nil, actualErr)
|
||||
// no events are expected because relist #1 set the old pod record which has the infra container
|
||||
// running. relist #2 had the inspection error and therefore didn't modify either old or new.
|
||||
// relist #3 forced the reinspection of the pod to retrieve its status, but because the list of
|
||||
// containers was the same as relist #1, nothing "changed", so there are no new events.
|
||||
assert.Exactly(t, []*PodLifecycleEvent{}, actualEvents)
|
||||
}
|
||||
|
||||
// Test detecting sandbox state changes.
|
||||
func TestRelistingWithSandboxes(t *testing.T) {
|
||||
testPleg := newTestGenericPLEG()
|
||||
pleg, runtime := testPleg.pleg, testPleg.runtime
|
||||
ch := pleg.Watch()
|
||||
// The first relist should send a PodSync event to each pod.
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Sandboxes: []*kubecontainer.Container{
|
||||
createTestContainer("c1", kubecontainer.ContainerStateExited),
|
||||
createTestContainer("c2", kubecontainer.ContainerStateRunning),
|
||||
createTestContainer("c3", kubecontainer.ContainerStateUnknown),
|
||||
},
|
||||
}},
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "4567",
|
||||
Sandboxes: []*kubecontainer.Container{
|
||||
createTestContainer("c1", kubecontainer.ContainerStateExited),
|
||||
},
|
||||
}},
|
||||
}
|
||||
pleg.relist()
|
||||
// Report every running/exited container if we see them for the first time.
|
||||
expected := []*PodLifecycleEvent{
|
||||
{ID: "1234", Type: ContainerStarted, Data: "c2"},
|
||||
{ID: "4567", Type: ContainerDied, Data: "c1"},
|
||||
{ID: "1234", Type: ContainerDied, Data: "c1"},
|
||||
}
|
||||
actual := getEventsFromChannel(ch)
|
||||
verifyEvents(t, expected, actual)
|
||||
|
||||
// The second relist should not send out any event because no container has
|
||||
// changed.
|
||||
pleg.relist()
|
||||
verifyEvents(t, expected, actual)
|
||||
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "1234",
|
||||
Sandboxes: []*kubecontainer.Container{
|
||||
createTestContainer("c2", kubecontainer.ContainerStateExited),
|
||||
createTestContainer("c3", kubecontainer.ContainerStateRunning),
|
||||
},
|
||||
}},
|
||||
{Pod: &kubecontainer.Pod{
|
||||
ID: "4567",
|
||||
Sandboxes: []*kubecontainer.Container{
|
||||
createTestContainer("c4", kubecontainer.ContainerStateRunning),
|
||||
},
|
||||
}},
|
||||
}
|
||||
pleg.relist()
|
||||
// Only report containers that transitioned to running or exited status.
|
||||
expected = []*PodLifecycleEvent{
|
||||
{ID: "1234", Type: ContainerRemoved, Data: "c1"},
|
||||
{ID: "1234", Type: ContainerDied, Data: "c2"},
|
||||
{ID: "1234", Type: ContainerStarted, Data: "c3"},
|
||||
{ID: "4567", Type: ContainerRemoved, Data: "c1"},
|
||||
{ID: "4567", Type: ContainerStarted, Data: "c4"},
|
||||
}
|
||||
|
||||
actual = getEventsFromChannel(ch)
|
||||
verifyEvents(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestRelistIPChange(t *testing.T) {
|
||||
pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
|
||||
ch := pleg.Watch()
|
||||
|
||||
id := types.UID("test-pod-0")
|
||||
cState := kubecontainer.ContainerStateRunning
|
||||
container := createTestContainer("c0", cState)
|
||||
pod := &kubecontainer.Pod{
|
||||
ID: id,
|
||||
Containers: []*kubecontainer.Container{container},
|
||||
}
|
||||
ipAddr := "192.168.1.5/24"
|
||||
status := &kubecontainer.PodStatus{
|
||||
ID: id,
|
||||
IP: ipAddr,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
|
||||
}
|
||||
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
|
||||
|
||||
runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once()
|
||||
runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once()
|
||||
|
||||
pleg.relist()
|
||||
actualEvents := getEventsFromChannel(ch)
|
||||
actualStatus, actualErr := pleg.cache.Get(pod.ID)
|
||||
assert.Equal(t, status, actualStatus, "test0")
|
||||
assert.Nil(t, actualErr, "test0")
|
||||
assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents)
|
||||
|
||||
// Clear the IP address and mark the container terminated
|
||||
container = createTestContainer("c0", kubecontainer.ContainerStateExited)
|
||||
pod = &kubecontainer.Pod{
|
||||
ID: id,
|
||||
Containers: []*kubecontainer.Container{container},
|
||||
}
|
||||
status = &kubecontainer.PodStatus{
|
||||
ID: id,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
|
||||
}
|
||||
event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID}
|
||||
runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once()
|
||||
runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once()
|
||||
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
actualStatus, actualErr = pleg.cache.Get(pod.ID)
|
||||
// Must copy status to compare since its pointer gets passed through all
|
||||
// the way to the event
|
||||
statusCopy := *status
|
||||
statusCopy.IP = ipAddr
|
||||
assert.Equal(t, &statusCopy, actualStatus, "test0")
|
||||
assert.Nil(t, actualErr, "test0")
|
||||
assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents)
|
||||
}
|
52
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pleg
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
type PodLifeCycleEventType string
|
||||
|
||||
const (
|
||||
ContainerStarted PodLifeCycleEventType = "ContainerStarted"
|
||||
ContainerDied PodLifeCycleEventType = "ContainerDied"
|
||||
ContainerRemoved PodLifeCycleEventType = "ContainerRemoved"
|
||||
// PodSync is used to trigger syncing of a pod when the observed change of
|
||||
// the state of the pod cannot be captured by any single event above.
|
||||
PodSync PodLifeCycleEventType = "PodSync"
|
||||
// Do not use the events below because they are disabled in GenericPLEG.
|
||||
ContainerChanged PodLifeCycleEventType = "ContainerChanged"
|
||||
)
|
||||
|
||||
// PodLifecycleEvent is an event that reflects the change of the pod state.
|
||||
type PodLifecycleEvent struct {
|
||||
// The pod ID.
|
||||
ID types.UID
|
||||
// The type of the event.
|
||||
Type PodLifeCycleEventType
|
||||
// The accompanied data which varies based on the event type.
|
||||
// - ContainerStarted/ContainerStopped: the container name (string).
|
||||
// - All other event types: unused.
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
type PodLifecycleEventGenerator interface {
|
||||
Start()
|
||||
Watch() chan *PodLifecycleEvent
|
||||
Healthy() (bool, error)
|
||||
}
|
Reference in New Issue
Block a user