vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

70
vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD generated vendored Normal file
View File

@ -0,0 +1,70 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"helpers.go",
"image_gc_manager.go",
"image_manager.go",
"puller.go",
"types.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/images",
deps = [
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/util/parsers:go_default_library",
"//vendor/github.com/docker/distribution/reference:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"image_gc_manager_test.go",
"image_manager_test.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/images",
library = ":go_default_library",
deps = [
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/server/stats/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

18
vendor/k8s.io/kubernetes/pkg/kubelet/images/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package images is responsible for managing lifecycle of container images.
package images

50
vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/client-go/util/flowcontrol"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// throttleImagePulling wraps kubecontainer.ImageService to throttle image
// pulling based on the given QPS and burst limits. If QPS is zero, defaults
// to no throttling.
func throttleImagePulling(imageService kubecontainer.ImageService, qps float32, burst int) kubecontainer.ImageService {
if qps == 0.0 {
return imageService
}
return &throttledImageService{
ImageService: imageService,
limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst),
}
}
type throttledImageService struct {
kubecontainer.ImageService
limiter flowcontrol.RateLimiter
}
func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret) (string, error) {
if ts.limiter.TryAccept() {
return ts.ImageService.PullImage(image, secrets)
}
return "", fmt.Errorf("pull QPS exceeded.")
}

View File

@ -0,0 +1,394 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
goerrors "errors"
"fmt"
"math"
"sort"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
)
// StatsProvider is an interface for fetching stats used during image garbage
// collection.
type StatsProvider interface {
// ImageFsStats returns the stats of the image filesystem.
ImageFsStats() (*statsapi.FsStats, error)
}
// Manages lifecycle of all images.
//
// Implementation is thread-safe.
type ImageGCManager interface {
// Applies the garbage collection policy. Errors include being unable to free
// enough space as per the garbage collection policy.
GarbageCollect() error
// Start async garbage collection of images.
Start()
GetImageList() ([]kubecontainer.Image, error)
// Delete all unused images and returns the number of bytes freed. The number of bytes freed is always returned.
DeleteUnusedImages() (int64, error)
}
// A policy for garbage collecting images. Policy defines an allowed band in
// which garbage collection will be run.
type ImageGCPolicy struct {
// Any usage above this threshold will always trigger garbage collection.
// This is the highest usage we will allow.
HighThresholdPercent int
// Any usage below this threshold will never trigger garbage collection.
// This is the lowest threshold we will try to garbage collect to.
LowThresholdPercent int
// Minimum age at which an image can be garbage collected.
MinAge time.Duration
}
type realImageGCManager struct {
// Container runtime
runtime container.Runtime
// Records of images and their use.
imageRecords map[string]*imageRecord
imageRecordsLock sync.Mutex
// The image garbage collection policy in use.
policy ImageGCPolicy
// statsProvider provides stats used during image garbage collection.
statsProvider StatsProvider
// Recorder for Kubernetes events.
recorder record.EventRecorder
// Reference to this node.
nodeRef *v1.ObjectReference
// Track initialization
initialized bool
// imageCache is the cache of latest image list.
imageCache imageCache
}
// imageCache caches latest result of ListImages.
type imageCache struct {
// sync.RWMutex is the mutex protects the image cache.
sync.RWMutex
// images is the image cache.
images []kubecontainer.Image
}
// set updates image cache.
func (i *imageCache) set(images []kubecontainer.Image) {
i.Lock()
defer i.Unlock()
i.images = images
}
// get gets image list from image cache.
func (i *imageCache) get() []kubecontainer.Image {
i.RLock()
defer i.RUnlock()
return i.images
}
// Information about the images we track.
type imageRecord struct {
// Time when this image was first detected.
firstDetected time.Time
// Time when we last saw this image being used.
lastUsed time.Time
// Size of the image in bytes.
size int64
}
func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
// Validate policy.
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
}
if policy.LowThresholdPercent < 0 || policy.LowThresholdPercent > 100 {
return nil, fmt.Errorf("invalid LowThresholdPercent %d, must be in range [0-100]", policy.LowThresholdPercent)
}
if policy.LowThresholdPercent > policy.HighThresholdPercent {
return nil, fmt.Errorf("LowThresholdPercent %d can not be higher than HighThresholdPercent %d", policy.LowThresholdPercent, policy.HighThresholdPercent)
}
im := &realImageGCManager{
runtime: runtime,
policy: policy,
imageRecords: make(map[string]*imageRecord),
statsProvider: statsProvider,
recorder: recorder,
nodeRef: nodeRef,
initialized: false,
}
return im, nil
}
func (im *realImageGCManager) Start() {
go wait.Until(func() {
// Initial detection make detected time "unknown" in the past.
var ts time.Time
if im.initialized {
ts = time.Now()
}
err := im.detectImages(ts)
if err != nil {
glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
} else {
im.initialized = true
}
}, 5*time.Minute, wait.NeverStop)
// Start a goroutine periodically updates image cache.
// TODO(random-liu): Merge this with the previous loop.
go wait.Until(func() {
images, err := im.runtime.ListImages()
if err != nil {
glog.Warningf("[imageGCManager] Failed to update image list: %v", err)
} else {
im.imageCache.set(images)
}
}, 30*time.Second, wait.NeverStop)
}
// Get a list of images on this node
func (im *realImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
return im.imageCache.get(), nil
}
func (im *realImageGCManager) detectImages(detectTime time.Time) error {
images, err := im.runtime.ListImages()
if err != nil {
return err
}
pods, err := im.runtime.GetPods(true)
if err != nil {
return err
}
// Make a set of images in use by containers.
imagesInUse := sets.NewString()
for _, pod := range pods {
for _, container := range pod.Containers {
glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID)
imagesInUse.Insert(container.ImageID)
}
}
// Add new images and record those being used.
now := time.Now()
currentImages := sets.NewString()
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
for _, image := range images {
glog.V(5).Infof("Adding image ID %s to currentImages", image.ID)
currentImages.Insert(image.ID)
// New image, set it as detected now.
if _, ok := im.imageRecords[image.ID]; !ok {
glog.V(5).Infof("Image ID %s is new", image.ID)
im.imageRecords[image.ID] = &imageRecord{
firstDetected: detectTime,
}
}
// Set last used time to now if the image is being used.
if isImageUsed(image, imagesInUse) {
glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now)
im.imageRecords[image.ID].lastUsed = now
}
glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size)
im.imageRecords[image.ID].size = image.Size
}
// Remove old images from our records.
for image := range im.imageRecords {
if !currentImages.Has(image) {
glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image)
delete(im.imageRecords, image)
}
}
return nil
}
func (im *realImageGCManager) GarbageCollect() error {
// Get disk usage on disk holding images.
fsStats, err := im.statsProvider.ImageFsStats()
if err != nil {
return err
}
var capacity, available int64
if fsStats.CapacityBytes != nil {
capacity = int64(*fsStats.CapacityBytes)
}
if fsStats.AvailableBytes != nil {
available = int64(*fsStats.AvailableBytes)
}
if available > capacity {
glog.Warningf("available %d is larger than capacity %d", available, capacity)
available = capacity
}
// Check valid capacity.
if capacity == 0 {
err := goerrors.New("invalid capacity 0 on image filesystem")
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
return err
}
// If over the max threshold, free enough to place us at the lower threshold.
usagePercent := 100 - int(available*100/capacity)
if usagePercent >= im.policy.HighThresholdPercent {
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
glog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", usagePercent, im.policy.HighThresholdPercent, amountToFree)
freed, err := im.freeSpace(amountToFree, time.Now())
if err != nil {
return err
}
if freed < amountToFree {
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d bytes, but freed %d bytes", amountToFree, freed)
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())
return err
}
}
return nil
}
func (im *realImageGCManager) DeleteUnusedImages() (int64, error) {
return im.freeSpace(math.MaxInt64, time.Now())
}
// Tries to free bytesToFree worth of images on the disk.
//
// Returns the number of bytes free and an error if any occurred. The number of
// bytes freed is always returned.
// Note that error may be nil and the number of bytes free may be less
// than bytesToFree.
func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) {
err := im.detectImages(freeTime)
if err != nil {
return 0, err
}
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
// Get all images in eviction order.
images := make([]evictionInfo, 0, len(im.imageRecords))
for image, record := range im.imageRecords {
images = append(images, evictionInfo{
id: image,
imageRecord: *record,
})
}
sort.Sort(byLastUsedAndDetected(images))
// Delete unused images until we've freed up enough space.
var deletionErrors []error
spaceFreed := int64(0)
for _, image := range images {
glog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id)
// Images that are currently in used were given a newer lastUsed.
if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) {
glog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime)
break
}
// Avoid garbage collect the image if the image is not old enough.
// In such a case, the image may have just been pulled down, and will be used by a container right away.
if freeTime.Sub(image.firstDetected) < im.policy.MinAge {
glog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge)
continue
}
// Remove image. Continue despite errors.
glog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size)
err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id})
if err != nil {
deletionErrors = append(deletionErrors, err)
continue
}
delete(im.imageRecords, image.id)
spaceFreed += image.size
if spaceFreed >= bytesToFree {
break
}
}
if len(deletionErrors) > 0 {
return spaceFreed, fmt.Errorf("wanted to free %d bytes, but freed %d bytes space with errors in image deletion: %v", bytesToFree, spaceFreed, errors.NewAggregate(deletionErrors))
}
return spaceFreed, nil
}
type evictionInfo struct {
id string
imageRecord
}
type byLastUsedAndDetected []evictionInfo
func (ev byLastUsedAndDetected) Len() int { return len(ev) }
func (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] }
func (ev byLastUsedAndDetected) Less(i, j int) bool {
// Sort by last used, break ties by detected.
if ev[i].lastUsed.Equal(ev[j].lastUsed) {
return ev[i].firstDetected.Before(ev[j].firstDetected)
} else {
return ev[i].lastUsed.Before(ev[j].lastUsed)
}
}
func isImageUsed(image container.Image, imagesInUse sets.String) bool {
// Check the image ID.
if _, ok := imagesInUse[image.ID]; ok {
return true
}
return false
}

View File

@ -0,0 +1,530 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing"
)
var zero time.Time
func newRealImageGCManager(policy ImageGCPolicy) (*realImageGCManager, *containertest.FakeRuntime, *statstest.StatsProvider) {
fakeRuntime := &containertest.FakeRuntime{}
mockStatsProvider := new(statstest.StatsProvider)
return &realImageGCManager{
runtime: fakeRuntime,
policy: policy,
imageRecords: make(map[string]*imageRecord),
statsProvider: mockStatsProvider,
recorder: &record.FakeRecorder{},
}, fakeRuntime, mockStatsProvider
}
// Accessors used for thread-safe testing.
func (im *realImageGCManager) imageRecordsLen() int {
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
return len(im.imageRecords)
}
func (im *realImageGCManager) getImageRecord(name string) (*imageRecord, bool) {
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
v, ok := im.imageRecords[name]
vCopy := *v
return &vCopy, ok
}
// Returns the id of the image with the given ID.
func imageID(id int) string {
return fmt.Sprintf("image-%d", id)
}
// Returns the name of the image with the given ID.
func imageName(id int) string {
return imageID(id) + "-name"
}
// Make an image with the specified ID.
func makeImage(id int, size int64) container.Image {
return container.Image{
ID: imageID(id),
Size: size,
}
}
// Make a container with the specified ID. It will use the image with the same ID.
func makeContainer(id int) *container.Container {
return &container.Container{
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", id)},
Image: imageName(id),
ImageID: imageID(id),
}
}
func TestDetectImagesInitialDetect(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
makeImage(2, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
{
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", 1)},
ImageID: imageID(1),
// The image filed is not set to simulate a no-name image
},
{
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", 2)},
Image: imageName(2),
ImageID: imageID(2),
},
},
}},
}
startTime := time.Now().Add(-time.Millisecond)
err := manager.detectImages(zero)
assert := assert.New(t)
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 3)
noContainer, ok := manager.getImageRecord(imageID(0))
require.True(t, ok)
assert.Equal(zero, noContainer.firstDetected)
assert.Equal(zero, noContainer.lastUsed)
withContainerUsingNoNameImage, ok := manager.getImageRecord(imageID(1))
require.True(t, ok)
assert.Equal(zero, withContainerUsingNoNameImage.firstDetected)
assert.True(withContainerUsingNoNameImage.lastUsed.After(startTime))
withContainer, ok := manager.getImageRecord(imageID(2))
require.True(t, ok)
assert.Equal(zero, withContainer.firstDetected)
assert.True(withContainer.lastUsed.After(startTime))
}
func TestDetectImagesWithNewImage(t *testing.T) {
// Just one image initially.
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(1),
},
}},
}
err := manager.detectImages(zero)
assert := assert.New(t)
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2)
// Add a new image.
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 1024),
makeImage(2, 1024),
}
detectedTime := zero.Add(time.Second)
startTime := time.Now().Add(-time.Millisecond)
err = manager.detectImages(detectedTime)
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 3)
noContainer, ok := manager.getImageRecord(imageID(0))
require.True(t, ok)
assert.Equal(zero, noContainer.firstDetected)
assert.Equal(zero, noContainer.lastUsed)
withContainer, ok := manager.getImageRecord(imageID(1))
require.True(t, ok)
assert.Equal(zero, withContainer.firstDetected)
assert.True(withContainer.lastUsed.After(startTime))
newContainer, ok := manager.getImageRecord(imageID(2))
require.True(t, ok)
assert.Equal(detectedTime, newContainer.firstDetected)
assert.Equal(zero, noContainer.lastUsed)
}
func TestDetectImagesContainerStopped(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(1),
},
}},
}
err := manager.detectImages(zero)
assert := assert.New(t)
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2)
withContainer, ok := manager.getImageRecord(imageID(1))
require.True(t, ok)
// Simulate container being stopped.
fakeRuntime.AllPodList = []*containertest.FakePod{}
err = manager.detectImages(time.Now())
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2)
container1, ok := manager.getImageRecord(imageID(0))
require.True(t, ok)
assert.Equal(zero, container1.firstDetected)
assert.Equal(zero, container1.lastUsed)
container2, ok := manager.getImageRecord(imageID(1))
require.True(t, ok)
assert.Equal(zero, container2.firstDetected)
assert.True(container2.lastUsed.Equal(withContainer.lastUsed))
}
func TestDetectImagesWithRemovedImages(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(1),
},
}},
}
err := manager.detectImages(zero)
assert := assert.New(t)
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2)
// Simulate both images being removed.
fakeRuntime.ImageList = []container.Image{}
err = manager.detectImages(time.Now())
require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 0)
}
func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(1),
},
}},
}
spaceFreed, err := manager.freeSpace(2048, time.Now())
assert := assert.New(t)
require.NoError(t, err)
assert.EqualValues(1024, spaceFreed)
assert.Len(fakeRuntime.ImageList, 1)
}
func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
makeImage(2, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(2),
},
}},
}
spaceFreed, err := manager.DeleteUnusedImages()
assert := assert.New(t)
require.NoError(t, err)
assert.EqualValues(3072, spaceFreed)
assert.Len(fakeRuntime.ImageList, 1)
}
func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(0),
makeContainer(1),
},
}},
}
// Make 1 be more recently used than 0.
require.NoError(t, manager.detectImages(zero))
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(1),
},
}},
}
require.NoError(t, manager.detectImages(time.Now()))
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{},
}},
}
require.NoError(t, manager.detectImages(time.Now()))
require.Equal(t, manager.imageRecordsLen(), 2)
spaceFreed, err := manager.freeSpace(1024, time.Now())
assert := assert.New(t)
require.NoError(t, err)
assert.EqualValues(1024, spaceFreed)
assert.Len(fakeRuntime.ImageList, 1)
}
func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
manager, fakeRuntime, _ := newRealImageGCManager(ImageGCPolicy{})
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
}
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(0),
},
}},
}
// Make 1 more recently detected but used at the same time as 0.
require.NoError(t, manager.detectImages(zero))
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
require.NoError(t, manager.detectImages(time.Now()))
fakeRuntime.AllPodList = []*containertest.FakePod{}
require.NoError(t, manager.detectImages(time.Now()))
require.Equal(t, manager.imageRecordsLen(), 2)
spaceFreed, err := manager.freeSpace(1024, time.Now())
assert := assert.New(t)
require.NoError(t, err)
assert.EqualValues(2048, spaceFreed)
assert.Len(fakeRuntime.ImageList, 1)
}
func TestGarbageCollectBelowLowThreshold(t *testing.T) {
policy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
manager, _, mockStatsProvider := newRealImageGCManager(policy)
// Expect 40% usage.
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{
AvailableBytes: uint64Ptr(600),
CapacityBytes: uint64Ptr(1000),
}, nil)
assert.NoError(t, manager.GarbageCollect())
}
func TestGarbageCollectCadvisorFailure(t *testing.T) {
policy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
manager, _, mockStatsProvider := newRealImageGCManager(policy)
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{}, fmt.Errorf("error"))
assert.NotNil(t, manager.GarbageCollect())
}
func TestGarbageCollectBelowSuccess(t *testing.T) {
policy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
manager, fakeRuntime, mockStatsProvider := newRealImageGCManager(policy)
// Expect 95% usage and most of it gets freed.
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{
AvailableBytes: uint64Ptr(50),
CapacityBytes: uint64Ptr(1000),
}, nil)
fakeRuntime.ImageList = []container.Image{
makeImage(0, 450),
}
assert.NoError(t, manager.GarbageCollect())
}
func TestGarbageCollectNotEnoughFreed(t *testing.T) {
policy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
manager, fakeRuntime, mockStatsProvider := newRealImageGCManager(policy)
// Expect 95% usage and little of it gets freed.
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{
AvailableBytes: uint64Ptr(50),
CapacityBytes: uint64Ptr(1000),
}, nil)
fakeRuntime.ImageList = []container.Image{
makeImage(0, 50),
}
assert.NotNil(t, manager.GarbageCollect())
}
func TestGarbageCollectImageNotOldEnough(t *testing.T) {
policy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
MinAge: time.Minute * 1,
}
fakeRuntime := &containertest.FakeRuntime{}
mockStatsProvider := new(statstest.StatsProvider)
manager := &realImageGCManager{
runtime: fakeRuntime,
policy: policy,
imageRecords: make(map[string]*imageRecord),
statsProvider: mockStatsProvider,
recorder: &record.FakeRecorder{},
}
fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024),
makeImage(1, 2048),
}
// 1 image is in use, and another one is not old enough
fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{
Containers: []*container.Container{
makeContainer(1),
},
}},
}
fakeClock := clock.NewFakeClock(time.Now())
t.Log(fakeClock.Now())
require.NoError(t, manager.detectImages(fakeClock.Now()))
require.Equal(t, manager.imageRecordsLen(), 2)
// no space freed since one image is in used, and another one is not old enough
spaceFreed, err := manager.freeSpace(1024, fakeClock.Now())
assert := assert.New(t)
require.NoError(t, err)
assert.EqualValues(0, spaceFreed)
assert.Len(fakeRuntime.ImageList, 2)
// move clock by minAge duration, then 1 image will be garbage collected
fakeClock.Step(policy.MinAge)
spaceFreed, err = manager.freeSpace(1024, fakeClock.Now())
require.NoError(t, err)
assert.EqualValues(1024, spaceFreed)
assert.Len(fakeRuntime.ImageList, 1)
}
func TestValidateImageGCPolicy(t *testing.T) {
testCases := []struct {
name string
imageGCPolicy ImageGCPolicy
expectErr string
}{
{
name: "Test for LowThresholdPercent < HighThresholdPercent",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: 2,
LowThresholdPercent: 1,
},
},
{
name: "Test for HighThresholdPercent < 0,",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: -1,
},
expectErr: "invalid HighThresholdPercent -1, must be in range [0-100]",
},
{
name: "Test for HighThresholdPercent > 100",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: 101,
},
expectErr: "invalid HighThresholdPercent 101, must be in range [0-100]",
},
{
name: "Test for LowThresholdPercent < 0",
imageGCPolicy: ImageGCPolicy{
LowThresholdPercent: -1,
},
expectErr: "invalid LowThresholdPercent -1, must be in range [0-100]",
},
{
name: "Test for LowThresholdPercent > 100",
imageGCPolicy: ImageGCPolicy{
LowThresholdPercent: 101,
},
expectErr: "invalid LowThresholdPercent 101, must be in range [0-100]",
},
{
name: "Test for LowThresholdPercent > HighThresholdPercent",
imageGCPolicy: ImageGCPolicy{
HighThresholdPercent: 1,
LowThresholdPercent: 2,
},
expectErr: "LowThresholdPercent 2 can not be higher than HighThresholdPercent 1",
},
}
for _, tc := range testCases {
if _, err := NewImageGCManager(nil, nil, nil, nil, tc.imageGCPolicy); err != nil {
if err.Error() != tc.expectErr {
t.Errorf("[%s:]Expected err:%v, but got:%v", tc.name, tc.expectErr, err.Error())
}
}
}
}
func uint64Ptr(i uint64) *uint64 {
return &i
}

View File

@ -0,0 +1,164 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"fmt"
dockerref "github.com/docker/distribution/reference"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/util/parsers"
)
// imageManager provides the functionalities for image pulling.
type imageManager struct {
recorder record.EventRecorder
imageService kubecontainer.ImageService
backOff *flowcontrol.Backoff
// It will check the presence of the image, and report the 'image pulling', image pulled' events correspondingly.
puller imagePuller
}
var _ ImageManager = &imageManager{}
func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, qps float32, burst int) ImageManager {
imageService = throttleImagePulling(imageService, qps, burst)
var puller imagePuller
if serialized {
puller = newSerialImagePuller(imageService)
} else {
puller = newParallelImagePuller(imageService)
}
return &imageManager{
recorder: recorder,
imageService: imageService,
backOff: imageBackOff,
puller: puller,
}
}
// shouldPullImage returns whether we should pull an image according to
// the presence and pull policy of the image.
func shouldPullImage(container *v1.Container, imagePresent bool) bool {
if container.ImagePullPolicy == v1.PullNever {
return false
}
if container.ImagePullPolicy == v1.PullAlways ||
(container.ImagePullPolicy == v1.PullIfNotPresent && (!imagePresent)) {
return true
}
return false
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
m.recorder.Event(ref, eventtype, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}
}
// EnsureImageExists pulls the image for the specified pod and container, and returns
// (imageRef, error message, error).
func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (string, string, error) {
logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image)
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
}
// If the image contains no tag or digest, a default tag should be applied.
image, err := applyDefaultImageTag(container.Image)
if err != nil {
msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
return "", msg, ErrInvalidImageName
}
spec := kubecontainer.ImageSpec{Image: image}
imageRef, err := m.imageService.GetImageRef(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
return "", msg, ErrImageInspect
}
present := imageRef != ""
if !shouldPullImage(container, present) {
if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
return imageRef, "", nil
} else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return "", msg, ErrImageNeverPull
}
}
backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image)
if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info)
return "", msg, ErrImagePullBackOff
}
m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
pullChan := make(chan pullResult)
m.puller.pullImage(spec, pullSecrets, pullChan)
imagePullResult := <-pullChan
if imagePullResult.err != nil {
m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), glog.Warning)
m.backOff.Next(backOffKey, m.backOff.Clock.Now())
if imagePullResult.err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
return "", msg, imagePullResult.err
}
return "", imagePullResult.err.Error(), ErrImagePull
}
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
m.backOff.GC()
return imagePullResult.imageRef, "", nil
}
// applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest,
// a default tag will be applied.
func applyDefaultImageTag(image string) (string, error) {
named, err := dockerref.ParseNormalizedNamed(image)
if err != nil {
return "", fmt.Errorf("couldn't parse image reference %q: %v", image, err)
}
_, isTagged := named.(dockerref.Tagged)
_, isDigested := named.(dockerref.Digested)
if !isTagged && !isDigested {
named, err := dockerref.WithTag(named, parsers.DefaultImageTag)
if err != nil {
return "", fmt.Errorf("failed to apply default image tag %q: %v", image, err)
}
image = named.String()
}
return image, nil
}

View File

@ -0,0 +1,202 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
. "k8s.io/kubernetes/pkg/kubelet/container"
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
type pullerExpects struct {
calls []string
err error
}
type pullerTestCase struct {
containerImage string
policy v1.PullPolicy
inspectErr error
pullerErr error
expected []pullerExpects
}
func pullerTestCases() []pullerTestCase {
return []pullerTestCase{
{ // pull missing image
containerImage: "missing_image",
policy: v1.PullIfNotPresent,
inspectErr: nil,
pullerErr: nil,
expected: []pullerExpects{
{[]string{"GetImageRef", "PullImage"}, nil},
}},
{ // image present, don't pull
containerImage: "present_image",
policy: v1.PullIfNotPresent,
inspectErr: nil,
pullerErr: nil,
expected: []pullerExpects{
{[]string{"GetImageRef"}, nil},
{[]string{"GetImageRef"}, nil},
{[]string{"GetImageRef"}, nil},
}},
// image present, pull it
{containerImage: "present_image",
policy: v1.PullAlways,
inspectErr: nil,
pullerErr: nil,
expected: []pullerExpects{
{[]string{"GetImageRef", "PullImage"}, nil},
{[]string{"GetImageRef", "PullImage"}, nil},
{[]string{"GetImageRef", "PullImage"}, nil},
}},
// missing image, error PullNever
{containerImage: "missing_image",
policy: v1.PullNever,
inspectErr: nil,
pullerErr: nil,
expected: []pullerExpects{
{[]string{"GetImageRef"}, ErrImageNeverPull},
{[]string{"GetImageRef"}, ErrImageNeverPull},
{[]string{"GetImageRef"}, ErrImageNeverPull},
}},
// missing image, unable to inspect
{containerImage: "missing_image",
policy: v1.PullIfNotPresent,
inspectErr: errors.New("unknown inspectError"),
pullerErr: nil,
expected: []pullerExpects{
{[]string{"GetImageRef"}, ErrImageInspect},
{[]string{"GetImageRef"}, ErrImageInspect},
{[]string{"GetImageRef"}, ErrImageInspect},
}},
// missing image, unable to fetch
{containerImage: "typo_image",
policy: v1.PullIfNotPresent,
inspectErr: nil,
pullerErr: errors.New("404"),
expected: []pullerExpects{
{[]string{"GetImageRef", "PullImage"}, ErrImagePull},
{[]string{"GetImageRef", "PullImage"}, ErrImagePull},
{[]string{"GetImageRef"}, ErrImagePullBackOff},
{[]string{"GetImageRef", "PullImage"}, ErrImagePull},
{[]string{"GetImageRef"}, ErrImagePullBackOff},
{[]string{"GetImageRef"}, ErrImagePullBackOff},
}},
}
}
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *clock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
container = &v1.Container{
Name: "container_name",
Image: c.containerImage,
ImagePullPolicy: c.policy,
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
fakeClock = clock.NewFakeClock(time.Now())
backOff.Clock = fakeClock
fakeRuntime = &ctest.FakeRuntime{}
fakeRecorder := &record.FakeRecorder{}
fakeRuntime.ImageList = []Image{{ID: "docker.io/library/present_image:latest"}}
fakeRuntime.Err = c.pullerErr
fakeRuntime.InspectErr = c.inspectErr
puller = NewImageManager(fakeRecorder, fakeRuntime, backOff, serialized, 0, 0)
return
}
func TestParallelPuller(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pod",
Namespace: "test-ns",
UID: "bar",
ResourceVersion: "42",
SelfLink: "/api/v1/pods/foo",
}}
cases := pullerTestCases()
for i, c := range cases {
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, false)
for tick, expected := range c.expected {
fakeRuntime.CalledFunctions = nil
fakeClock.Step(time.Second)
_, _, err := puller.EnsureImageExists(pod, container, nil)
assert.NoError(t, fakeRuntime.AssertCalls(expected.calls), "in test %d tick=%d", i, tick)
assert.Equal(t, expected.err, err, "in test %d tick=%d", i, tick)
}
}
}
func TestSerializedPuller(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pod",
Namespace: "test-ns",
UID: "bar",
ResourceVersion: "42",
SelfLink: "/api/v1/pods/foo",
}}
cases := pullerTestCases()
for i, c := range cases {
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, true)
for tick, expected := range c.expected {
fakeRuntime.CalledFunctions = nil
fakeClock.Step(time.Second)
_, _, err := puller.EnsureImageExists(pod, container, nil)
assert.NoError(t, fakeRuntime.AssertCalls(expected.calls), "in test %d tick=%d", i, tick)
assert.Equal(t, expected.err, err, "in test %d tick=%d", i, tick)
}
}
}
func TestApplyDefaultImageTag(t *testing.T) {
for _, testCase := range []struct {
Input string
Output string
}{
{Input: "root", Output: "docker.io/library/root:latest"},
{Input: "root:tag", Output: "root:tag"},
{Input: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Output: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
} {
image, err := applyDefaultImageTag(testCase.Input)
if err != nil {
t.Errorf("applyDefaultImageTag(%s) failed: %v", testCase.Input, err)
} else if image != testCase.Output {
t.Errorf("Expected image reference: %q, got %q", testCase.Output, image)
}
}
}

92
vendor/k8s.io/kubernetes/pkg/kubelet/images/puller.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
type pullResult struct {
imageRef string
err error
}
type imagePuller interface {
pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult)
}
var _, _ imagePuller = &parallelImagePuller{}, &serialImagePuller{}
type parallelImagePuller struct {
imageService kubecontainer.ImageService
}
func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller {
return &parallelImagePuller{imageService}
}
func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult) {
go func() {
imageRef, err := pip.imageService.PullImage(spec, pullSecrets)
pullChan <- pullResult{
imageRef: imageRef,
err: err,
}
}()
}
// Maximum number of image pull requests than can be queued.
const maxImagePullRequests = 10
type serialImagePuller struct {
imageService kubecontainer.ImageService
pullRequests chan *imagePullRequest
}
func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller {
imagePuller := &serialImagePuller{imageService, make(chan *imagePullRequest, maxImagePullRequests)}
go wait.Until(imagePuller.processImagePullRequests, time.Second, wait.NeverStop)
return imagePuller
}
type imagePullRequest struct {
spec kubecontainer.ImageSpec
pullSecrets []v1.Secret
pullChan chan<- pullResult
}
func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult) {
sip.pullRequests <- &imagePullRequest{
spec: spec,
pullSecrets: pullSecrets,
pullChan: pullChan,
}
}
func (sip *serialImagePuller) processImagePullRequests() {
for pullRequest := range sip.pullRequests {
imageRef, err := sip.imageService.PullImage(pullRequest.spec, pullRequest.pullSecrets)
pullRequest.pullChan <- pullResult{
imageRef: imageRef,
err: err,
}
}
}

55
vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"errors"
"k8s.io/api/core/v1"
)
var (
// Container image pull failed, kubelet is backing off image pull
ErrImagePullBackOff = errors.New("ImagePullBackOff")
// Unable to inspect image
ErrImageInspect = errors.New("ImageInspectError")
// General image pull error
ErrImagePull = errors.New("ErrImagePull")
// Required Image is absent on host and PullPolicy is NeverPullImage
ErrImageNeverPull = errors.New("ErrImageNeverPull")
// Get http error when pulling image from registry
RegistryUnavailable = errors.New("RegistryUnavailable")
// Unable to parse the image name.
ErrInvalidImageName = errors.New("InvalidImageName")
)
// ImageManager provides an interface to manage the lifecycle of images.
// Implementations of this interface are expected to deal with pulling (downloading),
// managing, and deleting container images.
// Implementations are expected to abstract the underlying runtimes.
// Implementations are expected to be thread safe.
type ImageManager interface {
// EnsureImageExists ensures that image specified in `container` exists.
EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (string, string, error)
// TODO(ronl): consolidating image managing and deleting operation in this interface
}