vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

112
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD generated vendored Normal file
View File

@ -0,0 +1,112 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"daemon_controller.go",
"doc.go",
"update.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/daemon/util:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"daemon_controller_test.go",
"update_test.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
library = ":go_default_library",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/labels:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/daemon/util:all-srcs",
],
tags = ["automanaged"],
)

8
vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS generated vendored Executable file
View File

@ -0,0 +1,8 @@
approvers:
- mikedanese
reviewers:
- janetkuo
- lukaszo
- mikedanese
- tnozicka
- k82cn

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

19
vendor/k8s.io/kubernetes/pkg/controller/daemon/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package daemon contains logic for watching and synchronizing
// daemons.
package daemon // import "k8s.io/kubernetes/pkg/controller/daemon"

View File

@ -0,0 +1,436 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"bytes"
"fmt"
"sort"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/rand"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
// rollingUpdate deletes old daemon set pods making sure that no more than
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeToDaemonPods)
if err != nil {
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
}
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
// for oldPods delete all not running pods
var oldPodsToDelete []string
glog.V(4).Infof("Marking all unavailable old pods for deletion")
for _, pod := range oldUnavailablePods {
// Skip terminating pods. We won't delete them again
if pod.DeletionTimestamp != nil {
continue
}
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
}
glog.V(4).Infof("Marking old pods for deletion")
for _, pod := range oldAvailablePods {
if numUnavailable >= maxUnavailable {
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
break
}
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
numUnavailable++
}
return dsc.syncNodes(ds, oldPodsToDelete, []string{}, hash)
}
// constructHistory finds all histories controlled by the given DaemonSet, and
// update current history revision number, or create current history if need to.
// It also deduplicates current history, and adds missing unique labels to existing histories.
func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
var histories []*apps.ControllerRevision
var currentHistories []*apps.ControllerRevision
histories, err = dsc.controlledHistories(ds)
if err != nil {
return nil, nil, err
}
for _, history := range histories {
// Add the unique label if it's not already added to the history
// We use history name instead of computing hash, so that we don't need to worry about hash collision
if _, ok := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; !ok {
toUpdate := history.DeepCopy()
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, nil, err
}
}
// Compare histories with ds to separate cur and old history
found := false
found, err = Match(ds, history)
if err != nil {
return nil, nil, err
}
if found {
currentHistories = append(currentHistories, history)
} else {
old = append(old, history)
}
}
currRevision := maxRevision(old) + 1
switch len(currentHistories) {
case 0:
// Create a new history if the current one isn't found
cur, err = dsc.snapshot(ds, currRevision)
if err != nil {
return nil, nil, err
}
default:
cur, err = dsc.dedupCurHistories(ds, currentHistories)
if err != nil {
return nil, nil, err
}
// Update revision number if necessary
if cur.Revision < currRevision {
toUpdate := cur.DeepCopy()
toUpdate.Revision = currRevision
_, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, nil, err
}
}
}
return cur, old, err
}
func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []*apps.ControllerRevision) error {
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
toKeep := int(*ds.Spec.RevisionHistoryLimit)
toKill := len(old) - toKeep
if toKill <= 0 {
return nil
}
// Find all hashes of live pods
liveHashes := make(map[string]bool)
for _, pods := range nodesToDaemonPods {
for _, pod := range pods {
if hash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
liveHashes[hash] = true
}
}
}
// Find all live history with the above hashes
liveHistory := make(map[string]bool)
for _, history := range old {
if hash := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
liveHistory[history.Name] = true
}
}
// Clean up old history from smallest to highest revision (from oldest to newest)
sort.Sort(historiesByRevision(old))
for _, history := range old {
if toKill <= 0 {
break
}
if liveHistory[history.Name] {
continue
}
// Clean up
err := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Delete(history.Name, nil)
if err != nil {
return err
}
toKill--
}
return nil
}
// maxRevision returns the max revision number of the given list of histories
func maxRevision(histories []*apps.ControllerRevision) int64 {
max := int64(0)
for _, history := range histories {
if history.Revision > max {
max = history.Revision
}
}
return max
}
func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
if len(curHistories) == 1 {
return curHistories[0], nil
}
var maxRevision int64
var keepCur *apps.ControllerRevision
for _, cur := range curHistories {
if cur.Revision >= maxRevision {
keepCur = cur
maxRevision = cur.Revision
}
}
// Clean up duplicates and relabel pods
for _, cur := range curHistories {
if cur.Name == keepCur.Name {
continue
}
// Relabel pods before dedup
pods, err := dsc.getDaemonPods(ds)
if err != nil {
return nil, err
}
for _, pod := range pods {
if pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] {
toUpdate := pod.DeepCopy()
if toUpdate.Labels == nil {
toUpdate.Labels = make(map[string]string)
}
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, err
}
}
}
// Remove duplicates
err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Delete(cur.Name, nil)
if err != nil {
return nil, err
}
}
return keepCur, nil
}
// controlledHistories returns all ControllerRevisions controlled by the given DaemonSet.
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned histories are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) ([]*apps.ControllerRevision, error) {
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, err
}
// List all histories to include those that don't match the selector anymore
// but have a ControllerRef pointing to the controller.
histories, err := dsc.historyLister.List(labels.Everything())
if err != nil {
return nil, err
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != ds.UID {
return nil, fmt.Errorf("original DaemonSet %v/%v is gone: got uid %v, wanted %v", ds.Namespace, ds.Name, fresh.UID, ds.UID)
}
return fresh, nil
})
// Use ControllerRefManager to adopt/orphan as needed.
cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
return cm.ClaimControllerRevisions(histories)
}
// Match check if the given DaemonSet's template matches the template stored in the given history.
func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, error) {
patch, err := getPatch(ds)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getPatch(ds *extensions.DaemonSet) ([]byte, error) {
dsBytes, err := json.Marshal(ds)
if err != nil {
return nil, err
}
var raw map[string]interface{}
err = json.Unmarshal(dsBytes, &raw)
if err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
// Create a patch of the DaemonSet that replaces spec.template
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
patch, err := getPatch(ds)
if err != nil {
return nil, err
}
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
name := ds.Name + "-" + rand.SafeEncodeString(hash)
history := &apps.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ds.Namespace,
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, extensions.DefaultDaemonSetUniqueLabelKey, hash),
Annotations: ds.Annotations,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
},
Data: runtime.RawExtension{Raw: patch},
Revision: revision,
}
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Create(history)
if errors.IsAlreadyExists(err) {
// TODO: Is it okay to get from historyLister?
existedHistory, getErr := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
if getErr != nil {
return nil, getErr
}
// Check if we already created it
done, err := Match(ds, existedHistory)
if err != nil {
return nil, err
}
if done {
return existedHistory, nil
}
// Handle name collisions between different history
// TODO: Is it okay to get from dsLister?
currDS, getErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if getErr != nil {
return nil, getErr
}
if currDS.Status.CollisionCount == nil {
currDS.Status.CollisionCount = new(int32)
}
*currDS.Status.CollisionCount++
_, updateErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).UpdateStatus(currDS)
if updateErr != nil {
return nil, updateErr
}
glog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
return nil, err
}
return history, err
}
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
var newPods []*v1.Pod
var oldPods []*v1.Pod
for _, pods := range nodeToDaemonPods {
for _, pod := range pods {
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) {
newPods = append(newPods, pod)
} else {
oldPods = append(oldPods, pod)
}
}
}
return newPods, oldPods
}
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
glog.V(4).Infof("Getting unavailable numbers")
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
}
var numUnavailable, desiredNumberScheduled int
for i := range nodeList {
node := nodeList[i]
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return -1, -1, err
}
if !wantToRun {
continue
}
desiredNumberScheduled++
daemonPods, exists := nodeToDaemonPods[node.Name]
if !exists {
numUnavailable++
continue
}
available := false
for _, pod := range daemonPods {
//for the purposes of update we ensure that the Pod is both available and not terminating
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) && pod.DeletionTimestamp == nil {
available = true
break
}
}
if !available {
numUnavailable++
}
}
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
if err != nil {
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
}
glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
return maxUnavailable, numUnavailable, nil
}
type historiesByRevision []*apps.ControllerRevision
func (h historiesByRevision) Len() int { return len(h) }
func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h historiesByRevision) Less(i, j int) bool {
return h[i].Revision < h[j].Revision
}

View File

@ -0,0 +1,310 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"testing"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func TestDaemonSetUpdatesPods(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 2
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.TemplateGeneration++
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.TemplateGeneration++
manager.dsStore.Update(ds)
// new pods are not ready numUnavailable == maxUnavailable
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.TemplateGeneration++
manager.dsStore.Update(ds)
// all old pods are unavailable so should be removed
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
// template is not changed no pod should be removed
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestGetUnavailableNumbers(t *testing.T) {
cases := []struct {
name string
Manager *daemonSetsController
ds *extensions.DaemonSet
nodeToPods map[string][]*v1.Pod
maxUnavailable int
numUnavailable int
Err error
}{
{
name: "No nodes",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
return manager
}(),
ds: func() *extensions.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromInt(0)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: make(map[string][]*v1.Pod),
maxUnavailable: 0,
numUnavailable: 0,
},
{
name: "Two nodes with ready pods",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *extensions.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromInt(1)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
numUnavailable: 0,
},
{
name: "Two nodes, one node without pods",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *extensions.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromInt(0)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
markPodReady(pod0)
mapping["node-0"] = []*v1.Pod{pod0}
return mapping
}(),
maxUnavailable: 0,
numUnavailable: 1,
},
{
name: "Two nodes with pods, MaxUnavailable in percents",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *extensions.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromString("50%")
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
numUnavailable: 0,
},
{
name: "Two nodes with pods, MaxUnavailable in percents, pod terminating",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *extensions.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromString("50%")
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
now := metav1.Now()
markPodReady(pod0)
markPodReady(pod1)
pod1.DeletionTimestamp = &now
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
numUnavailable: 1,
},
}
for _, c := range cases {
c.Manager.dsStore.Add(c.ds)
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, c.nodeToPods)
if err != nil && c.Err != nil {
if c.Err != err {
t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err)
}
} else if err != nil {
t.Errorf("Test case: %s. Unexpected error: %v", c.name, err)
} else if maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable {
t.Errorf("Test case: %s. Wrong values. maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", c.name, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable)
}
}
}

View File

@ -0,0 +1,51 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["daemonset_util.go"],
importpath = "k8s.io/kubernetes/pkg/controller/daemon/util",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/util/labels:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["daemonset_util_test.go"],
importpath = "k8s.io/kubernetes/pkg/controller/daemon/util",
library = ":go_default_library",
deps = [
"//pkg/api/testapi:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)

View File

@ -0,0 +1,117 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
)
// CreatePodTemplate returns copy of provided template with additional
// label which contains templateGeneration (for backward compatibility),
// hash of provided template and sets default daemon tolerations.
func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash string) v1.PodTemplateSpec {
newTemplate := *template.DeepCopy()
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns not ready.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint unreachable:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns unreachable.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
// OutOfDisk taint.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeDiskPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeMemoryPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
// TODO(#48843) OutOfDisk taints will be removed in 1.10
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
kubelettypes.IsCritical(newTemplate.Namespace, newTemplate.Annotations) {
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
}
templateGenerationStr := fmt.Sprint(generation)
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
template.ObjectMeta.Labels,
extensions.DaemonSetTemplateGenerationKey,
templateGenerationStr,
)
// TODO: do we need to validate if the DaemonSet is RollingUpdate or not?
if len(hash) > 0 {
newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash
}
return newTemplate
}
// IsPodUpdate checks if pod contains label value that either matches templateGeneration or hash
func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod, hash string) bool {
// Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration
templateMatches := pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash
return hashMatches || templateMatches
}
// SplitByAvailablePods splits provided daemon set pods by availabilty
func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod) {
unavailablePods := []*v1.Pod{}
availablePods := []*v1.Pod{}
for _, pod := range pods {
if podutil.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
availablePods = append(availablePods, pod)
} else {
unavailablePods = append(unavailablePods, pod)
}
}
return availablePods, unavailablePods
}

View File

@ -0,0 +1,164 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"testing"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/testapi"
)
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: metav1.ObjectMeta{
Labels: label,
Namespace: metav1.NamespaceDefault,
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
}
pod.Name = podName
return pod
}
func TestIsPodUpdated(t *testing.T) {
templateGeneration := int64(12345)
hash := "55555"
labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash}
labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)}
tests := []struct {
test string
templateGeneration int64
pod *v1.Pod
hash string
isUpdated bool
}{
{
"templateGeneration and hash both match",
templateGeneration,
newPod("pod1", "node1", labels),
hash,
true,
},
{
"templateGeneration matches, hash doesn't",
templateGeneration,
newPod("pod1", "node1", labels),
hash + "123",
true,
},
{
"templateGeneration matches, no hash label, has hash",
templateGeneration,
newPod("pod1", "node1", labelsNoHash),
hash,
true,
},
{
"templateGeneration matches, no hash label, no hash",
templateGeneration,
newPod("pod1", "node1", labelsNoHash),
"",
true,
},
{
"templateGeneration matches, has hash label, no hash",
templateGeneration,
newPod("pod1", "node1", labels),
"",
true,
},
{
"templateGeneration doesn't match, hash does",
templateGeneration + 1,
newPod("pod1", "node1", labels),
hash,
true,
},
{
"templateGeneration and hash don't match",
templateGeneration + 1,
newPod("pod1", "node1", labels),
hash + "123",
false,
},
{
"empty labels, no hash",
templateGeneration,
newPod("pod1", "node1", map[string]string{}),
"",
false,
},
{
"empty labels",
templateGeneration,
newPod("pod1", "node1", map[string]string{}),
hash,
false,
},
{
"no labels",
templateGeneration,
newPod("pod1", "node1", nil),
hash,
false,
},
}
for _, test := range tests {
updated := IsPodUpdated(test.templateGeneration, test.pod, test.hash)
if updated != test.isUpdated {
t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated)
}
}
}
func TestCreatePodTemplate(t *testing.T) {
tests := []struct {
templateGeneration int64
hash string
expectUniqueLabel bool
}{
{int64(1), "", false},
{int64(2), "3242341807", true},
}
for _, test := range tests {
podTemplateSpec := v1.PodTemplateSpec{}
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
if !exists || val != fmt.Sprint(test.templateGeneration) {
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", test.templateGeneration, val)
}
val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
if test.expectUniqueLabel && (!exists || val != test.hash) {
t.Errorf("Expected podTemplateSpec to have hash label value: %s, got: %s", test.hash, val)
}
if !test.expectUniqueLabel && exists {
t.Errorf("Expected podTemplateSpec to have no hash label, got: %s", val)
}
}
}