vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

View File

@ -0,0 +1,98 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"stateful_pod_control.go",
"stateful_set.go",
"stateful_set_control.go",
"stateful_set_status_updater.go",
"stateful_set_utils.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/statefulset",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/history:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"stateful_pod_control_test.go",
"stateful_set_control_test.go",
"stateful_set_status_updater_test.go",
"stateful_set_test.go",
"stateful_set_utils_test.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/statefulset",
library = ":go_default_library",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/apps/install:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/history:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

13
vendor/k8s.io/kubernetes/pkg/controller/statefulset/OWNERS generated vendored Executable file
View File

@ -0,0 +1,13 @@
approvers:
- enisoc
- foxish
- janetkuo
- kow3ns
- smarterclayton
reviewers:
- enisoc
- foxish
- janetkuo
- kow3ns
- smarterclayton
- tnozicka

View File

@ -0,0 +1,201 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"fmt"
"strings"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
errorutils "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
)
// StatefulPodControlInterface defines the interface that StatefulSetController uses to create, update, and delete Pods,
// and to update the Status of a StatefulSet. It follows the design paradigms used for PodControl, but its
// implementation provides for PVC creation, ordered Pod creation, ordered Pod termination, and Pod identity enforcement.
// Like controller.PodControlInterface, it is implemented as an interface to provide for testing fakes.
type StatefulPodControlInterface interface {
// CreateStatefulPod create a Pod in a StatefulSet. Any PVCs necessary for the Pod are created prior to creating
// the Pod. If the returned error is nil the Pod and its PVCs have been created.
CreateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error
// UpdateStatefulPod Updates a Pod in a StatefulSet. If the Pod already has the correct identity and stable
// storage this method is a no-op. If the Pod must be mutated to conform to the Set, it is mutated and updated.
// pod is an in-out parameter, and any updates made to the pod are reflected as mutations to this parameter. If
// the create is successful, the returned error is nil.
UpdateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error
// DeleteStatefulPod deletes a Pod in a StatefulSet. The pods PVCs are not deleted. If the delete is successful,
// the returned error is nil.
DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error
}
func NewRealStatefulPodControl(
client clientset.Interface,
setLister appslisters.StatefulSetLister,
podLister corelisters.PodLister,
pvcLister corelisters.PersistentVolumeClaimLister,
recorder record.EventRecorder,
) StatefulPodControlInterface {
return &realStatefulPodControl{client, setLister, podLister, pvcLister, recorder}
}
// realStatefulPodControl implements StatefulPodControlInterface using a clientset.Interface to communicate with the
// API server. The struct is package private as the internal details are irrelevant to importing packages.
type realStatefulPodControl struct {
client clientset.Interface
setLister appslisters.StatefulSetLister
podLister corelisters.PodLister
pvcLister corelisters.PersistentVolumeClaimLister
recorder record.EventRecorder
}
func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
// Create the Pod's PVCs prior to creating the Pod
if err := spc.createPersistentVolumeClaims(set, pod); err != nil {
spc.recordPodEvent("create", set, pod, err)
return err
}
// If we created the PVCs attempt to create the Pod
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(pod)
// sink already exists errors
if apierrors.IsAlreadyExists(err) {
return err
}
spc.recordPodEvent("create", set, pod, err)
return err
}
func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
attemptedUpdate := false
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
// assume the Pod is consistent
consistent := true
// if the Pod does not conform to its identity, update the identity and dirty the Pod
if !identityMatches(set, pod) {
updateIdentity(set, pod)
consistent = false
}
// if the Pod does not conform to the StatefulSet's storage requirements, update the Pod's PVC's,
// dirty the Pod, and create any missing PVCs
if !storageMatches(set, pod) {
updateStorage(set, pod)
consistent = false
if err := spc.createPersistentVolumeClaims(set, pod); err != nil {
spc.recordPodEvent("update", set, pod, err)
return err
}
}
// if the Pod is not dirty, do nothing
if consistent {
return nil
}
attemptedUpdate = true
// commit the update, retrying on conflicts
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(pod)
if updateErr == nil {
return nil
}
if updated, err := spc.podLister.Pods(set.Namespace).Get(pod.Name); err == nil {
// make a copy so we don't mutate the shared cache
pod = updated.DeepCopy()
} else {
utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s from lister: %v", set.Namespace, pod.Name, err))
}
return updateErr
})
if attemptedUpdate {
spc.recordPodEvent("update", set, pod, err)
}
return err
}
func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
err := spc.client.CoreV1().Pods(set.Namespace).Delete(pod.Name, nil)
spc.recordPodEvent("delete", set, pod, err)
return err
}
// recordPodEvent records an event for verb applied to a Pod in a StatefulSet. If err is nil the generated event will
// have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a reason of v1.EventTypeWarning.
func (spc *realStatefulPodControl) recordPodEvent(verb string, set *apps.StatefulSet, pod *v1.Pod, err error) {
if err == nil {
reason := fmt.Sprintf("Successful%s", strings.Title(verb))
message := fmt.Sprintf("%s Pod %s in StatefulSet %s successful",
strings.ToLower(verb), pod.Name, set.Name)
spc.recorder.Event(set, v1.EventTypeNormal, reason, message)
} else {
reason := fmt.Sprintf("Failed%s", strings.Title(verb))
message := fmt.Sprintf("%s Pod %s in StatefulSet %s failed error: %s",
strings.ToLower(verb), pod.Name, set.Name, err)
spc.recorder.Event(set, v1.EventTypeWarning, reason, message)
}
}
// recordClaimEvent records an event for verb applied to the PersistentVolumeClaim of a Pod in a StatefulSet. If err is
// nil the generated event will have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a
// reason of v1.EventTypeWarning.
func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *apps.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, err error) {
if err == nil {
reason := fmt.Sprintf("Successful%s", strings.Title(verb))
message := fmt.Sprintf("%s Claim %s Pod %s in StatefulSet %s success",
strings.ToLower(verb), claim.Name, pod.Name, set.Name)
spc.recorder.Event(set, v1.EventTypeNormal, reason, message)
} else {
reason := fmt.Sprintf("Failed%s", strings.Title(verb))
message := fmt.Sprintf("%s Claim %s for Pod %s in StatefulSet %s failed error: %s",
strings.ToLower(verb), claim.Name, pod.Name, set.Name, err)
spc.recorder.Event(set, v1.EventTypeWarning, reason, message)
}
}
// createPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, which mush be a member of
// set. If all of the claims for Pod are successfully created, the returned error is nil. If creation fails, this method
// may be called again until no error is returned, indicating the PersistentVolumeClaims for pod are consistent with
// set's Spec.
func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.StatefulSet, pod *v1.Pod) error {
var errs []error
for _, claim := range getPersistentVolumeClaims(set, pod) {
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
switch {
case apierrors.IsNotFound(err):
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim)
if err != nil {
errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err))
}
if err == nil || !apierrors.IsAlreadyExists(err) {
spc.recordClaimEvent("create", set, pod, &claim, err)
}
case err != nil:
errs = append(errs, fmt.Errorf("Failed to retrieve PVC %s: %s", claim.Name, err))
spc.recordClaimEvent("create", set, pod, &claim, err)
}
// TODO: Check resource requirements and accessmodes, update if necessary
}
return errorutils.NewAggregate(errs)
}
var _ StatefulPodControlInterface = &realStatefulPodControl{}

View File

@ -0,0 +1,448 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"errors"
"strings"
"testing"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
corelisters "k8s.io/client-go/listers/core/v1"
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
)
func TestStatefulPodControlCreatesPods(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), action.GetResource().Resource)
})
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
})
fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
})
if err := control.CreateStatefulPod(set, pod); err != nil {
t.Errorf("StatefulPodControl failed to create Pod error: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 2 {
t.Errorf("Expected 2 events for successful create found %d", eventCount)
}
for i := range events {
if !strings.Contains(events[i], v1.EventTypeNormal) {
t.Errorf("Found unexpected non-normal event %s", events[i])
}
}
}
func TestStatefulPodControlCreatePodExists(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcs := getPersistentVolumeClaims(set, pod)
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for k := range pvcs {
pvc := pvcs[k]
pvcIndexer.Add(&pvc)
}
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
})
fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, pod, apierrors.NewAlreadyExists(action.GetResource().GroupResource(), pod.Name)
})
if err := control.CreateStatefulPod(set, pod); !apierrors.IsAlreadyExists(err) {
t.Errorf("Failed to create Pod error: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 0 {
t.Errorf("Pod and PVC exist: got %d events, but want 0", eventCount)
for i := range events {
t.Log(events[i])
}
}
}
func TestStatefulPodControlCreatePodPvcCreateFailure(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
})
if err := control.CreateStatefulPod(set, pod); err == nil {
t.Error("Failed to produce error on PVC creation failure")
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 2 {
t.Errorf("PVC create failure: got %d events, but want 2", eventCount)
}
for i := range events {
if !strings.Contains(events[i], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-warning event %s", events[i])
}
}
}
type fakeIndexer struct {
cache.Indexer
getError error
}
func (f *fakeIndexer) GetByKey(key string) (interface{}, bool, error) {
return nil, false, f.getError
}
func TestStatefulPodControlCreatePodPvcGetFailure(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := &fakeIndexer{getError: errors.New("API server down")}
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
})
if err := control.CreateStatefulPod(set, pod); err == nil {
t.Error("Failed to produce error on PVC creation failure")
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 2 {
t.Errorf("PVC create failure: got %d events, but want 2", eventCount)
}
for i := range events {
if !strings.Contains(events[i], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-warning event: %s", events[i])
}
}
}
func TestStatefulPodControlCreatePodFailed(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
})
fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
if err := control.CreateStatefulPod(set, pod); err == nil {
t.Error("Failed to produce error on Pod creation failure")
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 2 {
t.Errorf("Pod create failed: got %d events, but want 2", eventCount)
} else if !strings.Contains(events[0], v1.EventTypeNormal) {
t.Errorf("Found unexpected non-normal event %s", events[0])
} else if !strings.Contains(events[1], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-warning event %s", events[1])
}
}
func TestStatefulPodControlNoOpUpdate(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
t.Error("no-op update should not make any client invocation")
return true, nil, apierrors.NewInternalError(errors.New("If we are here we have a problem"))
})
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Error returned on no-op update error: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 0 {
t.Errorf("no-op update: got %d events, but want 0", eventCount)
}
}
func TestStatefulPodControlUpdatesIdentity(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := fake.NewSimpleClientset(set, pod)
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
var updated *v1.Pod
fakeClient.PrependReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
updated = update.GetObject().(*v1.Pod)
return true, update.GetObject(), nil
})
pod.Name = "goo-0"
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Successful update returned an error: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("Pod update successful:got %d events,but want 1", eventCount)
} else if !strings.Contains(events[0], v1.EventTypeNormal) {
t.Errorf("Found unexpected non-normal event %s", events[0])
}
if !identityMatches(set, updated) {
t.Error("Name update failed identity does not match")
}
}
func TestStatefulPodControlUpdateIdentityFailure(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
gooPod := newStatefulSetPod(set, 0)
gooPod.Name = "goo-0"
indexer.Add(gooPod)
podLister := corelisters.NewPodLister(indexer)
control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder)
fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
pod.Name = "goo-0"
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
pod.Name = "goo-0"
if err := control.UpdateStatefulPod(set, pod); err == nil {
t.Error("Failed update does not generate an error")
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("Pod update failed: got %d events, but want 1", eventCount)
} else if !strings.Contains(events[0], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-warning event %s", events[0])
}
if identityMatches(set, pod) {
t.Error("Failed update mutated Pod identity")
}
}
func TestStatefulPodControlUpdatesPodStorage(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
pvcs := getPersistentVolumeClaims(set, pod)
volumes := make([]v1.Volume, 0, len(pod.Spec.Volumes))
for i := range pod.Spec.Volumes {
if _, contains := pvcs[pod.Spec.Volumes[i].Name]; !contains {
volumes = append(volumes, pod.Spec.Volumes[i])
}
}
pod.Spec.Volumes = volumes
fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
})
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
})
var updated *v1.Pod
fakeClient.PrependReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
updated = update.GetObject().(*v1.Pod)
return true, update.GetObject(), nil
})
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Successful update returned an error: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 2 {
t.Errorf("Pod storage update successful: got %d events, but want 2", eventCount)
}
for i := range events {
if !strings.Contains(events[i], v1.EventTypeNormal) {
t.Errorf("Found unexpected non-normal event %s", events[i])
}
}
if !storageMatches(set, updated) {
t.Error("Name update failed identity does not match")
}
}
func TestStatefulPodControlUpdatePodStorageFailure(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
pvcs := getPersistentVolumeClaims(set, pod)
volumes := make([]v1.Volume, 0, len(pod.Spec.Volumes))
for i := range pod.Spec.Volumes {
if _, contains := pvcs[pod.Spec.Volumes[i].Name]; !contains {
volumes = append(volumes, pod.Spec.Volumes[i])
}
}
pod.Spec.Volumes = volumes
fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
})
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
if err := control.UpdateStatefulPod(set, pod); err == nil {
t.Error("Failed Pod storage update did not return an error")
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 2 {
t.Errorf("Pod storage update failed: got %d events, but want 2", eventCount)
}
for i := range events {
if !strings.Contains(events[i], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-normal event %s", events[i])
}
}
}
func TestStatefulPodControlUpdatePodConflictSuccess(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
gooPod := newStatefulSetPod(set, 0)
gooPod.Name = "goo-0"
indexer.Add(gooPod)
podLister := corelisters.NewPodLister(indexer)
control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder)
conflict := false
fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
if !conflict {
conflict = true
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), pod.Name, errors.New("conflict"))
} else {
return true, update.GetObject(), nil
}
})
pod.Name = "goo-0"
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Successful update returned an error: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("Pod update successful: got %d, but want 1", eventCount)
} else if !strings.Contains(events[0], v1.EventTypeNormal) {
t.Errorf("Found unexpected non-normal event %s", events[0])
}
if !identityMatches(set, pod) {
t.Error("Name update failed identity does not match")
}
}
func TestStatefulPodControlDeletesStatefulPod(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
fakeClient.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
if err := control.DeleteStatefulPod(set, pod); err != nil {
t.Errorf("Error returned on successful delete: %s", err)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("delete successful: got %d events, but want 1", eventCount)
} else if !strings.Contains(events[0], v1.EventTypeNormal) {
t.Errorf("Found unexpected non-normal event %s", events[0])
}
}
func TestStatefulPodControlDeleteFailure(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
fakeClient.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
if err := control.DeleteStatefulPod(set, pod); err == nil {
t.Error("Failed to return error on failed delete")
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("delete failed: got %d events, but want 1", eventCount)
} else if !strings.Contains(events[0], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-warning event %s", events[0])
}
}
func collectEvents(source <-chan string) []string {
done := false
events := make([]string, 0)
for !done {
select {
case event := <-source:
events = append(events, event)
default:
done = true
}
}
return events
}

View File

@ -0,0 +1,462 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"fmt"
"reflect"
"time"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/history"
"github.com/golang/glog"
)
const (
// period to relist statefulsets and verify pets
statefulSetResyncPeriod = 30 * time.Second
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = apps.SchemeGroupVersion.WithKind("StatefulSet")
// StatefulSetController controls statefulsets.
type StatefulSetController struct {
// client interface
kubeClient clientset.Interface
// control returns an interface capable of syncing a stateful set.
// Abstracted out for testing.
control StatefulSetControlInterface
// podControl is used for patching pods.
podControl controller.PodControlInterface
// podLister is able to list/get pods from a shared informer's store
podLister corelisters.PodLister
// podListerSynced returns true if the pod shared informer has synced at least once
podListerSynced cache.InformerSynced
// setLister is able to list/get stateful sets from a shared informer's store
setLister appslisters.StatefulSetLister
// setListerSynced returns true if the stateful set shared informer has synced at least once
setListerSynced cache.InformerSynced
// pvcListerSynced returns true if the pvc shared informer has synced at least once
pvcListerSynced cache.InformerSynced
// revListerSynced returns true if the rev shared informer has synced at least once
revListerSynced cache.InformerSynced
// StatefulSets that need to be synced.
queue workqueue.RateLimitingInterface
}
// NewStatefulSetController creates a new statefulset controller.
func NewStatefulSetController(
podInformer coreinformers.PodInformer,
setInformer appsinformers.StatefulSetInformer,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
revInformer appsinformers.ControllerRevisionInformer,
kubeClient clientset.Interface,
) *StatefulSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
ssc := &StatefulSetController{
kubeClient: kubeClient,
control: NewDefaultStatefulSetControl(
NewRealStatefulPodControl(
kubeClient,
setInformer.Lister(),
podInformer.Lister(),
pvcInformer.Lister(),
recorder),
NewRealStatefulSetStatusUpdater(kubeClient, setInformer.Lister()),
history.NewHistory(kubeClient, revInformer.Lister()),
),
pvcListerSynced: pvcInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"),
podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder},
revListerSynced: revInformer.Informer().HasSynced,
}
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
// lookup the statefulset and enqueue
AddFunc: ssc.addPod,
// lookup current and old statefulset if labels changed
UpdateFunc: ssc.updatePod,
// lookup statefulset accounting for deletion tombstones
DeleteFunc: ssc.deletePod,
})
ssc.podLister = podInformer.Lister()
ssc.podListerSynced = podInformer.Informer().HasSynced
setInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ssc.enqueueStatefulSet,
UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.StatefulSet)
curPS := cur.(*apps.StatefulSet)
if oldPS.Status.Replicas != curPS.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
}
ssc.enqueueStatefulSet(cur)
},
DeleteFunc: ssc.enqueueStatefulSet,
},
statefulSetResyncPeriod,
)
ssc.setLister = setInformer.Lister()
ssc.setListerSynced = setInformer.Informer().HasSynced
// TODO: Watch volumes
return ssc
}
// Run runs the statefulset controller.
func (ssc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer ssc.queue.ShutDown()
glog.Infof("Starting stateful set controller")
defer glog.Infof("Shutting down statefulset controller")
if !controller.WaitForCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(ssc.worker, time.Second, stopCh)
}
<-stopCh
}
// addPod adds the statefulset for the pod to the sync queue
func (ssc *StatefulSetController) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
if pod.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation.
ssc.deletePod(pod)
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(pod); controllerRef != nil {
set := ssc.resolveControllerRef(pod.Namespace, controllerRef)
if set == nil {
return
}
glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
ssc.enqueueStatefulSet(set)
return
}
// Otherwise, it's an orphan. Get a list of all matching controllers and sync
// them to see if anyone wants to adopt it.
sets := ssc.getStatefulSetsForPod(pod)
if len(sets) == 0 {
return
}
glog.V(4).Infof("Orphan Pod %s created, labels: %+v", pod.Name, pod.Labels)
for _, set := range sets {
ssc.enqueueStatefulSet(set)
}
}
// updatePod adds the statefulset for the current and old pods to the sync queue.
func (ssc *StatefulSetController) updatePod(old, cur interface{}) {
curPod := cur.(*v1.Pod)
oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
curControllerRef := metav1.GetControllerOf(curPod)
oldControllerRef := metav1.GetControllerOf(oldPod)
controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if set := ssc.resolveControllerRef(oldPod.Namespace, oldControllerRef); set != nil {
ssc.enqueueStatefulSet(set)
}
}
// If it has a ControllerRef, that's all that matters.
if curControllerRef != nil {
set := ssc.resolveControllerRef(curPod.Namespace, curControllerRef)
if set == nil {
return
}
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
ssc.enqueueStatefulSet(set)
return
}
// Otherwise, it's an orphan. If anything changed, sync matching controllers
// to see if anyone wants to adopt it now.
if labelChanged || controllerRefChanged {
sets := ssc.getStatefulSetsForPod(curPod)
if len(sets) == 0 {
return
}
glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
for _, set := range sets {
ssc.enqueueStatefulSet(set)
}
}
}
// deletePod enqueues the statefulset for the pod accounting for deletion tombstones.
func (ssc *StatefulSetController) deletePod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new StatefulSet will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %+v", obj))
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod %+v", obj))
return
}
}
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
// No controller should care about orphans being deleted.
return
}
set := ssc.resolveControllerRef(pod.Namespace, controllerRef)
if set == nil {
return
}
glog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller())
ssc.enqueueStatefulSet(set)
}
// getPodsForStatefulSet returns the Pods that a given StatefulSet should manage.
// It also reconciles ControllerRef by adopting/orphaning.
//
// NOTE: Returned Pods are pointers to objects from the cache.
// If you need to modify one, you need to copy it first.
func (ssc *StatefulSetController) getPodsForStatefulSet(set *apps.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) {
// List all pods to include the pods that don't match the selector anymore but
// has a ControllerRef pointing to this StatefulSet.
pods, err := ssc.podLister.Pods(set.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
filter := func(pod *v1.Pod) bool {
// Only claim if it matches our StatefulSet name. Otherwise release/ignore.
return isMemberOf(set, pod)
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := ssc.kubeClient.AppsV1beta1().StatefulSets(set.Namespace).Get(set.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != set.UID {
return nil, fmt.Errorf("original StatefulSet %v/%v is gone: got uid %v, wanted %v", set.Namespace, set.Name, fresh.UID, set.UID)
}
return fresh, nil
})
cm := controller.NewPodControllerRefManager(ssc.podControl, set, selector, controllerKind, canAdoptFunc)
return cm.ClaimPods(pods, filter)
}
// adoptOrphanRevisions adopts any orphaned ControllerRevisions matched by set's Selector.
func (ssc *StatefulSetController) adoptOrphanRevisions(set *apps.StatefulSet) error {
revisions, err := ssc.control.ListRevisions(set)
if err != nil {
return err
}
hasOrphans := false
for i := range revisions {
if metav1.GetControllerOf(revisions[i]) == nil {
hasOrphans = true
break
}
}
if hasOrphans {
fresh, err := ssc.kubeClient.AppsV1beta1().StatefulSets(set.Namespace).Get(set.Name, metav1.GetOptions{})
if err != nil {
return err
}
if fresh.UID != set.UID {
return fmt.Errorf("original StatefulSet %v/%v is gone: got uid %v, wanted %v", set.Namespace, set.Name, fresh.UID, set.UID)
}
return ssc.control.AdoptOrphanRevisions(set, revisions)
}
return nil
}
// getStatefulSetsForPod returns a list of StatefulSets that potentially match
// a given pod.
func (ssc *StatefulSetController) getStatefulSetsForPod(pod *v1.Pod) []*apps.StatefulSet {
sets, err := ssc.setLister.GetPodStatefulSets(pod)
if err != nil {
return nil
}
// More than one set is selecting the same Pod
if len(sets) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
utilruntime.HandleError(
fmt.Errorf(
"user error: more than one StatefulSet is selecting pods with labels: %+v",
pod.Labels))
}
return sets
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (ssc *StatefulSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.StatefulSet {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
set, err := ssc.setLister.StatefulSets(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if set.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return set
}
// enqueueStatefulSet enqueues the given statefulset in the work queue.
func (ssc *StatefulSetController) enqueueStatefulSet(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Cound't get key for object %+v: %v", obj, err))
return
}
ssc.queue.Add(key)
}
// processNextWorkItem dequeues items, processes them, and marks them done. It enforces that the syncHandler is never
// invoked concurrently with the same key.
func (ssc *StatefulSetController) processNextWorkItem() bool {
key, quit := ssc.queue.Get()
if quit {
return false
}
defer ssc.queue.Done(key)
if err := ssc.sync(key.(string)); err != nil {
utilruntime.HandleError(fmt.Errorf("Error syncing StatefulSet %v, requeuing: %v", key.(string), err))
ssc.queue.AddRateLimited(key)
} else {
ssc.queue.Forget(key)
}
return true
}
// worker runs a worker goroutine that invokes processNextWorkItem until the controller's queue is closed
func (ssc *StatefulSetController) worker() {
for ssc.processNextWorkItem() {
}
}
// sync syncs the given statefulset.
func (ssc *StatefulSetController) sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
set, err := ssc.setLister.StatefulSets(namespace).Get(name)
if errors.IsNotFound(err) {
glog.Infof("StatefulSet has been deleted %v", key)
return nil
}
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to retrieve StatefulSet %v from store: %v", key, err))
return err
}
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
utilruntime.HandleError(fmt.Errorf("error converting StatefulSet %v selector: %v", key, err))
// This is a non-transient error, so don't retry.
return nil
}
if err := ssc.adoptOrphanRevisions(set); err != nil {
return err
}
pods, err := ssc.getPodsForStatefulSet(set, selector)
if err != nil {
return err
}
return ssc.syncStatefulSet(set, pods)
}
// syncStatefulSet syncs a tuple of (statefulset, []*v1.Pod).
func (ssc *StatefulSetController) syncStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error {
glog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
// TODO: investigate where we mutate the set during the update as it is not obvious.
if err := ssc.control.UpdateStatefulSet(set.DeepCopy(), pods); err != nil {
return err
}
glog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name)
return nil
}

View File

@ -0,0 +1,549 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"math"
"sort"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/controller/history"
)
// StatefulSetControl implements the control logic for updating StatefulSets and their children Pods. It is implemented
// as an interface to allow for extensions that provide different semantics. Currently, there is only one implementation.
type StatefulSetControlInterface interface {
// UpdateStatefulSet implements the control logic for Pod creation, update, and deletion, and
// persistent volume creation, update, and deletion.
// If an implementation returns a non-nil error, the invocation will be retried using a rate-limited strategy.
// Implementors should sink any errors that they do not wish to trigger a retry, and they may feel free to
// exit exceptionally at any point provided they wish the update to be re-run at a later point in time.
UpdateStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error
// ListRevisions returns a array of the ControllerRevisions that represent the revisions of set. If the returned
// error is nil, the returns slice of ControllerRevisions is valid.
ListRevisions(set *apps.StatefulSet) ([]*apps.ControllerRevision, error)
// AdoptOrphanRevisions adopts any orphaned ControllerRevisions that match set's Selector. If all adoptions are
// successful the returned error is nil.
AdoptOrphanRevisions(set *apps.StatefulSet, revisions []*apps.ControllerRevision) error
}
// NewDefaultStatefulSetControl returns a new instance of the default implementation StatefulSetControlInterface that
// implements the documented semantics for StatefulSets. podControl is the PodControlInterface used to create, update,
// and delete Pods and to create PersistentVolumeClaims. statusUpdater is the StatefulSetStatusUpdaterInterface used
// to update the status of StatefulSets. You should use an instance returned from NewRealStatefulPodControl() for any
// scenario other than testing.
func NewDefaultStatefulSetControl(
podControl StatefulPodControlInterface,
statusUpdater StatefulSetStatusUpdaterInterface,
controllerHistory history.Interface) StatefulSetControlInterface {
return &defaultStatefulSetControl{podControl, statusUpdater, controllerHistory}
}
type defaultStatefulSetControl struct {
podControl StatefulPodControlInterface
statusUpdater StatefulSetStatusUpdaterInterface
controllerHistory history.Interface
}
// UpdateStatefulSet executes the core logic loop for a stateful set, applying the predictable and
// consistent monotonic update strategy by default - scale up proceeds in ordinal order, no new pod
// is created while any pod is unhealthy, and pods are terminated in descending order. The burst
// strategy allows these constraints to be relaxed - pods will be created and deleted eagerly and
// in no particular order. Clients using the burst strategy should be careful to ensure they
// understand the consistency implications of having unpredictable numbers of pods available.
func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error {
// list all revisions and sort them
revisions, err := ssc.ListRevisions(set)
if err != nil {
return err
}
history.SortControllerRevisions(revisions)
// get the current, and update revisions
currentRevision, updateRevision, collisionCount, err := ssc.getStatefulSetRevisions(set, revisions)
if err != nil {
return err
}
// perform the main update function and get the status
status, err := ssc.updateStatefulSet(set, currentRevision, updateRevision, collisionCount, pods)
if err != nil {
return err
}
// update the set's status
err = ssc.updateStatefulSetStatus(set, status)
if err != nil {
return err
}
glog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d",
set.Namespace,
set.Name,
status.Replicas,
status.ReadyReplicas,
status.CurrentReplicas,
status.UpdatedReplicas)
glog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s",
set.Namespace,
set.Name,
status.CurrentRevision,
status.UpdateRevision)
// maintain the set's revision history limit
return ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)
}
func (ssc *defaultStatefulSetControl) ListRevisions(set *apps.StatefulSet) ([]*apps.ControllerRevision, error) {
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
return nil, err
}
return ssc.controllerHistory.ListControllerRevisions(set, selector)
}
func (ssc *defaultStatefulSetControl) AdoptOrphanRevisions(
set *apps.StatefulSet,
revisions []*apps.ControllerRevision) error {
for i := range revisions {
adopted, err := ssc.controllerHistory.AdoptControllerRevision(set, controllerKind, revisions[i])
if err != nil {
return err
}
revisions[i] = adopted
}
return nil
}
// truncateHistory truncates any non-live ControllerRevisions in revisions from set's history. The UpdateRevision and
// CurrentRevision in set's Status are considered to be live. Any revisions associated with the Pods in pods are also
// considered to be live. Non-live revisions are deleted, starting with the revision with the lowest Revision, until
// only RevisionHistoryLimit revisions remain. If the returned error is nil the operation was successful. This method
// expects that revisions is sorted when supplied.
func (ssc *defaultStatefulSetControl) truncateHistory(
set *apps.StatefulSet,
pods []*v1.Pod,
revisions []*apps.ControllerRevision,
current *apps.ControllerRevision,
update *apps.ControllerRevision) error {
history := make([]*apps.ControllerRevision, 0, len(revisions))
// mark all live revisions
live := map[string]bool{current.Name: true, update.Name: true}
for i := range pods {
live[getPodRevision(pods[i])] = true
}
// collect live revisions and historic revisions
for i := range revisions {
if !live[revisions[i].Name] {
history = append(history, revisions[i])
}
}
historyLen := len(history)
historyLimit := int(*set.Spec.RevisionHistoryLimit)
if historyLen <= historyLimit {
return nil
}
// delete any non-live history to maintain the revision limit.
history = history[:(historyLen - historyLimit)]
for i := 0; i < len(history); i++ {
if err := ssc.controllerHistory.DeleteControllerRevision(history[i]); err != nil {
return err
}
}
return nil
}
// getStatefulSetRevisions returns the current and update ControllerRevisions for set. It also
// returns a collision count that records the number of name collisions set saw when creating
// new ControllerRevisions. This count is incremented on every name collision and is used in
// building the ControllerRevision names for name collision avoidance. This method may create
// a new revision, or modify the Revision of an existing revision if an update to set is detected.
// This method expects that revisions is sorted when supplied.
func (ssc *defaultStatefulSetControl) getStatefulSetRevisions(
set *apps.StatefulSet,
revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, int32, error) {
var currentRevision, updateRevision *apps.ControllerRevision
revisionCount := len(revisions)
history.SortControllerRevisions(revisions)
// Use a local copy of set.Status.CollisionCount to avoid modifying set.Status directly.
// This copy is returned so the value gets carried over to set.Status in updateStatefulSet.
var collisionCount int32
if set.Status.CollisionCount != nil {
collisionCount = *set.Status.CollisionCount
}
// create a new revision from the current set
updateRevision, err := newRevision(set, nextRevision(revisions), &collisionCount)
if err != nil {
return nil, nil, collisionCount, err
}
// find any equivalent revisions
equalRevisions := history.FindEqualRevisions(revisions, updateRevision)
equalCount := len(equalRevisions)
if equalCount > 0 && history.EqualRevision(revisions[revisionCount-1], equalRevisions[equalCount-1]) {
// if the equivalent revision is immediately prior the update revision has not changed
updateRevision = revisions[revisionCount-1]
} else if equalCount > 0 {
// if the equivalent revision is not immediately prior we will roll back by incrementing the
// Revision of the equivalent revision
updateRevision, err = ssc.controllerHistory.UpdateControllerRevision(
equalRevisions[equalCount-1],
updateRevision.Revision)
if err != nil {
return nil, nil, collisionCount, err
}
} else {
//if there is no equivalent revision we create a new one
updateRevision, err = ssc.controllerHistory.CreateControllerRevision(set, updateRevision, &collisionCount)
if err != nil {
return nil, nil, collisionCount, err
}
}
// attempt to find the revision that corresponds to the current revision
for i := range revisions {
if revisions[i].Name == set.Status.CurrentRevision {
currentRevision = revisions[i]
}
}
// if the current revision is nil we initialize the history by setting it to the update revision
if currentRevision == nil {
currentRevision = updateRevision
}
return currentRevision, updateRevision, collisionCount, nil
}
// updateStatefulSet performs the update function for a StatefulSet. This method creates, updates, and deletes Pods in
// the set in order to conform the system to the target state for the set. The target state always contains
// set.Spec.Replicas Pods with a Ready Condition. If the UpdateStrategy.Type for the set is
// RollingUpdateStatefulSetStrategyType then all Pods in the set must be at set.Status.CurrentRevision.
// If the UpdateStrategy.Type for the set is OnDeleteStatefulSetStrategyType, the target state implies nothing about
// the revisions of Pods in the set. If the UpdateStrategy.Type for the set is PartitionStatefulSetStrategyType, then
// all Pods with ordinal less than UpdateStrategy.Partition.Ordinal must be at Status.CurrentRevision and all other
// Pods must be at Status.UpdateRevision. If the returned error is nil, the returned StatefulSetStatus is valid and the
// update must be recorded. If the error is not nil, the method should be retried until successful.
func (ssc *defaultStatefulSetControl) updateStatefulSet(
set *apps.StatefulSet,
currentRevision *apps.ControllerRevision,
updateRevision *apps.ControllerRevision,
collisionCount int32,
pods []*v1.Pod) (*apps.StatefulSetStatus, error) {
// get the current and update revisions of the set.
currentSet, err := ApplyRevision(set, currentRevision)
if err != nil {
return nil, err
}
updateSet, err := ApplyRevision(set, updateRevision)
if err != nil {
return nil, err
}
// set the generation, and revisions in the returned status
status := apps.StatefulSetStatus{}
status.ObservedGeneration = new(int64)
*status.ObservedGeneration = set.Generation
status.CurrentRevision = currentRevision.Name
status.UpdateRevision = updateRevision.Name
status.CollisionCount = new(int32)
*status.CollisionCount = collisionCount
replicaCount := int(*set.Spec.Replicas)
// slice that will contain all Pods such that 0 <= getOrdinal(pod) < set.Spec.Replicas
replicas := make([]*v1.Pod, replicaCount)
// slice that will contain all Pods such that set.Spec.Replicas <= getOrdinal(pod)
condemned := make([]*v1.Pod, 0, len(pods))
unhealthy := 0
firstUnhealthyOrdinal := math.MaxInt32
var firstUnhealthyPod *v1.Pod
// First we partition pods into two lists valid replicas and condemned Pods
for i := range pods {
status.Replicas++
// count the number of running and ready replicas
if isRunningAndReady(pods[i]) {
status.ReadyReplicas++
}
// count the number of current and update replicas
if isCreated(pods[i]) && !isTerminating(pods[i]) {
if getPodRevision(pods[i]) == currentRevision.Name {
status.CurrentReplicas++
} else if getPodRevision(pods[i]) == updateRevision.Name {
status.UpdatedReplicas++
}
}
if ord := getOrdinal(pods[i]); 0 <= ord && ord < replicaCount {
// if the ordinal of the pod is within the range of the current number of replicas,
// insert it at the indirection of its ordinal
replicas[ord] = pods[i]
} else if ord >= replicaCount {
// if the ordinal is greater than the number of replicas add it to the condemned list
condemned = append(condemned, pods[i])
}
// If the ordinal could not be parsed (ord < 0), ignore the Pod.
}
// for any empty indices in the sequence [0,set.Spec.Replicas) create a new Pod at the correct revision
for ord := 0; ord < replicaCount; ord++ {
if replicas[ord] == nil {
replicas[ord] = newVersionedStatefulSetPod(
currentSet,
updateSet,
currentRevision.Name,
updateRevision.Name, ord)
}
}
// sort the condemned Pods by their ordinals
sort.Sort(ascendingOrdinal(condemned))
// find the first unhealthy Pod
for i := range replicas {
if !isHealthy(replicas[i]) {
unhealthy++
if ord := getOrdinal(replicas[i]); ord < firstUnhealthyOrdinal {
firstUnhealthyOrdinal = ord
firstUnhealthyPod = replicas[i]
}
}
}
for i := range condemned {
if !isHealthy(condemned[i]) {
unhealthy++
if ord := getOrdinal(condemned[i]); ord < firstUnhealthyOrdinal {
firstUnhealthyOrdinal = ord
firstUnhealthyPod = condemned[i]
}
}
}
if unhealthy > 0 {
glog.V(4).Infof("StatefulSet %s/%s has %d unhealthy Pods starting with %s",
set.Namespace,
set.Name,
unhealthy,
firstUnhealthyPod.Name)
}
// If the StatefulSet is being deleted, don't do anything other than updating
// status.
if set.DeletionTimestamp != nil {
return &status, nil
}
monotonic := !allowsBurst(set)
// Examine each replica with respect to its ordinal
for i := range replicas {
// delete and recreate failed pods
if isFailed(replicas[i]) {
glog.V(4).Infof("StatefulSet %s/%s is recreating failed Pod %s",
set.Namespace,
set.Name,
replicas[i].Name)
if err := ssc.podControl.DeleteStatefulPod(set, replicas[i]); err != nil {
return &status, err
}
if getPodRevision(replicas[i]) == currentRevision.Name {
status.CurrentReplicas--
} else if getPodRevision(replicas[i]) == updateRevision.Name {
status.UpdatedReplicas--
}
status.Replicas--
replicas[i] = newVersionedStatefulSetPod(
currentSet,
updateSet,
currentRevision.Name,
updateRevision.Name,
i)
}
// If we find a Pod that has not been created we create the Pod
if !isCreated(replicas[i]) {
if err := ssc.podControl.CreateStatefulPod(set, replicas[i]); err != nil {
return &status, err
}
status.Replicas++
if getPodRevision(replicas[i]) == currentRevision.Name {
status.CurrentReplicas++
} else if getPodRevision(replicas[i]) == updateRevision.Name {
status.UpdatedReplicas++
}
// if the set does not allow bursting, return immediately
if monotonic {
return &status, nil
}
// pod created, no more work possible for this round
continue
}
// If we find a Pod that is currently terminating, we must wait until graceful deletion
// completes before we continue to make progress.
if isTerminating(replicas[i]) && monotonic {
glog.V(4).Infof(
"StatefulSet %s/%s is waiting for Pod %s to Terminate",
set.Namespace,
set.Name,
replicas[i].Name)
return &status, nil
}
// If we have a Pod that has been created but is not running and ready we can not make progress.
// We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its
// ordinal, are Running and Ready.
if !isRunningAndReady(replicas[i]) && monotonic {
glog.V(4).Infof(
"StatefulSet %s/%s is waiting for Pod %s to be Running and Ready",
set.Namespace,
set.Name,
replicas[i].Name)
return &status, nil
}
// Enforce the StatefulSet invariants
if identityMatches(set, replicas[i]) && storageMatches(set, replicas[i]) {
continue
}
// Make a deep copy so we don't mutate the shared cache
replica := replicas[i].DeepCopy()
if err := ssc.podControl.UpdateStatefulPod(updateSet, replica); err != nil {
return &status, err
}
}
// At this point, all of the current Replicas are Running and Ready, we can consider termination.
// We will wait for all predecessors to be Running and Ready prior to attempting a deletion.
// We will terminate Pods in a monotonically decreasing order over [len(pods),set.Spec.Replicas).
// Note that we do not resurrect Pods in this interval. Also not that scaling will take precedence over
// updates.
for target := len(condemned) - 1; target >= 0; target-- {
// wait for terminating pods to expire
if isTerminating(condemned[target]) {
glog.V(4).Infof(
"StatefulSet %s/%s is waiting for Pod %s to Terminate prior to scale down",
set.Namespace,
set.Name,
condemned[target].Name)
// block if we are in monotonic mode
if monotonic {
return &status, nil
}
continue
}
// if we are in monotonic mode and the condemned target is not the first unhealthy Pod block
if !isRunningAndReady(condemned[target]) && monotonic && condemned[target] != firstUnhealthyPod {
glog.V(4).Infof(
"StatefulSet %s/%s is waiting for Pod %s to be Running and Ready prior to scale down",
set.Namespace,
set.Name,
firstUnhealthyPod.Name)
return &status, nil
}
glog.V(4).Infof("StatefulSet %s/%s terminating Pod %s for scale dowm",
set.Namespace,
set.Name,
condemned[target].Name)
if err := ssc.podControl.DeleteStatefulPod(set, condemned[target]); err != nil {
return &status, err
}
if getPodRevision(condemned[target]) == currentRevision.Name {
status.CurrentReplicas--
} else if getPodRevision(condemned[target]) == updateRevision.Name {
status.UpdatedReplicas--
}
if monotonic {
return &status, nil
}
}
// for the OnDelete strategy we short circuit. Pods will be updated when they are manually deleted.
if set.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType {
return &status, nil
}
// we compute the minimum ordinal of the target sequence for a destructive update based on the strategy.
updateMin := 0
if set.Spec.UpdateStrategy.RollingUpdate != nil {
updateMin = int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
}
// we terminate the Pod with the largest ordinal that does not match the update revision.
for target := len(replicas) - 1; target >= updateMin; target-- {
// delete the Pod if it is not already terminating and does not match the update revision.
if getPodRevision(replicas[target]) != updateRevision.Name && !isTerminating(replicas[target]) {
glog.V(4).Infof("StatefulSet %s/%s terminating Pod %s for update",
set.Namespace,
set.Name,
replicas[target].Name)
err := ssc.podControl.DeleteStatefulPod(set, replicas[target])
status.CurrentReplicas--
return &status, err
}
// wait for unhealthy Pods on update
if !isHealthy(replicas[target]) {
glog.V(4).Infof(
"StatefulSet %s/%s is waiting for Pod %s to update",
set.Namespace,
set.Name,
replicas[target].Name)
return &status, nil
}
}
return &status, nil
}
// updateStatefulSetStatus updates set's Status to be equal to status. If status indicates a complete update, it is
// mutated to indicate completion. If status is semantically equivalent to set's Status no update is performed. If the
// returned error is nil, the update is successful.
func (ssc *defaultStatefulSetControl) updateStatefulSetStatus(
set *apps.StatefulSet,
status *apps.StatefulSetStatus) error {
// complete any in progress rolling update if necessary
completeRollingUpdate(set, status)
// if the status is not inconsistent do not perform an update
if !inconsistentStatus(set, status) {
return nil
}
// copy set and update its status
set = set.DeepCopy()
if err := ssc.statusUpdater.UpdateStatefulSetStatus(set, status); err != nil {
return err
}
return nil
}
var _ StatefulSetControlInterface = &defaultStatefulSetControl{}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"fmt"
apps "k8s.io/api/apps/v1beta1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
"k8s.io/client-go/util/retry"
)
// StatefulSetStatusUpdaterInterface is an interface used to update the StatefulSetStatus associated with a StatefulSet.
// For any use other than testing, clients should create an instance using NewRealStatefulSetStatusUpdater.
type StatefulSetStatusUpdaterInterface interface {
// UpdateStatefulSetStatus sets the set's Status to status. Implementations are required to retry on conflicts,
// but fail on other errors. If the returned error is nil set's Status has been successfully set to status.
UpdateStatefulSetStatus(set *apps.StatefulSet, status *apps.StatefulSetStatus) error
}
// NewRealStatefulSetStatusUpdater returns a StatefulSetStatusUpdaterInterface that updates the Status of a StatefulSet,
// using the supplied client and setLister.
func NewRealStatefulSetStatusUpdater(
client clientset.Interface,
setLister appslisters.StatefulSetLister) StatefulSetStatusUpdaterInterface {
return &realStatefulSetStatusUpdater{client, setLister}
}
type realStatefulSetStatusUpdater struct {
client clientset.Interface
setLister appslisters.StatefulSetLister
}
func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
set *apps.StatefulSet,
status *apps.StatefulSetStatus) error {
// don't wait due to limited number of clients, but backoff after the default number of steps
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
set.Status = *status
_, updateErr := ssu.client.AppsV1beta1().StatefulSets(set.Namespace).UpdateStatus(set)
if updateErr == nil {
return nil
}
if updated, err := ssu.setLister.StatefulSets(set.Namespace).Get(set.Name); err == nil {
// make a copy so we don't mutate the shared cache
set = updated.DeepCopy()
} else {
utilruntime.HandleError(fmt.Errorf("error getting updated StatefulSet %s/%s from lister: %v", set.Namespace, set.Name, err))
}
return updateErr
})
}
var _ StatefulSetStatusUpdaterInterface = &realStatefulSetStatusUpdater{}

View File

@ -0,0 +1,141 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"errors"
"testing"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/client-go/kubernetes/fake"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
)
func TestStatefulSetUpdaterUpdatesSetStatus(t *testing.T) {
set := newStatefulSet(3)
status := apps.StatefulSetStatus{ObservedGeneration: func() *int64 {
i := int64(1)
return &i
}(), Replicas: 2}
fakeClient := &fake.Clientset{}
updater := NewRealStatefulSetStatusUpdater(fakeClient, nil)
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
})
if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
t.Errorf("Error returned on successful status update: %s", err)
}
if set.Status.Replicas != 2 {
t.Errorf("UpdateStatefulSetStatus mutated the sets replicas %d", set.Status.Replicas)
}
}
func TestStatefulSetStatusUpdaterUpdatesObservedGeneration(t *testing.T) {
set := newStatefulSet(3)
status := apps.StatefulSetStatus{ObservedGeneration: func() *int64 {
i := int64(3)
return &i
}(), Replicas: 2}
fakeClient := &fake.Clientset{}
updater := NewRealStatefulSetStatusUpdater(fakeClient, nil)
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
sts := update.GetObject().(*apps.StatefulSet)
if sts.Status.ObservedGeneration == nil || *sts.Status.ObservedGeneration != int64(3) {
t.Errorf("expected observedGeneration to be synced with generation for statefulset %q", sts.Name)
}
return true, sts, nil
})
if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
t.Errorf("Error returned on successful status update: %s", err)
}
}
func TestStatefulSetStatusUpdaterUpdateReplicasFailure(t *testing.T) {
set := newStatefulSet(3)
status := apps.StatefulSetStatus{ObservedGeneration: func() *int64 {
i := int64(3)
return &i
}(), Replicas: 2}
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
indexer.Add(set)
setLister := appslisters.NewStatefulSetLister(indexer)
updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister)
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
t.Error("Failed update did not return error")
}
}
func TestStatefulSetStatusUpdaterUpdateReplicasConflict(t *testing.T) {
set := newStatefulSet(3)
status := apps.StatefulSetStatus{ObservedGeneration: func() *int64 {
i := int64(3)
return &i
}(), Replicas: 2}
conflict := false
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
indexer.Add(set)
setLister := appslisters.NewStatefulSetLister(indexer)
updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister)
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
if !conflict {
conflict = true
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists"))
} else {
return true, update.GetObject(), nil
}
})
if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
t.Errorf("UpdateStatefulSetStatus returned an error: %s", err)
}
if set.Status.Replicas != 2 {
t.Errorf("UpdateStatefulSetStatus mutated the sets replicas %d", set.Status.Replicas)
}
}
func TestStatefulSetStatusUpdaterUpdateReplicasConflictFailure(t *testing.T) {
set := newStatefulSet(3)
status := apps.StatefulSetStatus{ObservedGeneration: func() *int64 {
i := int64(3)
return &i
}(), Replicas: 2}
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
indexer.Add(set)
setLister := appslisters.NewStatefulSetLister(indexer)
updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister)
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists"))
})
if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
t.Error("UpdateStatefulSetStatus failed to return an error on get failure")
}
}

View File

@ -0,0 +1,708 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"sort"
"testing"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/history"
)
func alwaysReady() bool { return true }
func TestStatefulSetControllerCreates(t *testing.T) {
set := newStatefulSet(3)
ssc, spc := newFakeStatefulSetController(set)
if err := scaleUpStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn up StatefulSet : %s", err)
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
t.Error(err)
} else {
set = obj.(*apps.StatefulSet)
}
if set.Status.Replicas != 3 {
t.Errorf("set.Status.Replicas = %v; want 3", set.Status.Replicas)
}
}
func TestStatefulSetControllerDeletes(t *testing.T) {
set := newStatefulSet(3)
ssc, spc := newFakeStatefulSetController(set)
if err := scaleUpStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn up StatefulSet : %s", err)
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
t.Error(err)
} else {
set = obj.(*apps.StatefulSet)
}
if set.Status.Replicas != 3 {
t.Errorf("set.Status.Replicas = %v; want 3", set.Status.Replicas)
}
*set.Spec.Replicas = 0
if err := scaleDownStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn down StatefulSet : %s", err)
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
t.Error(err)
} else {
set = obj.(*apps.StatefulSet)
}
if set.Status.Replicas != 0 {
t.Errorf("set.Status.Replicas = %v; want 0", set.Status.Replicas)
}
}
func TestStatefulSetControllerRespectsTermination(t *testing.T) {
set := newStatefulSet(3)
ssc, spc := newFakeStatefulSetController(set)
if err := scaleUpStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn up StatefulSet : %s", err)
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
t.Error(err)
} else {
set = obj.(*apps.StatefulSet)
}
if set.Status.Replicas != 3 {
t.Errorf("set.Status.Replicas = %v; want 3", set.Status.Replicas)
}
pods, err := spc.addTerminatingPod(set, 3)
if err != nil {
t.Error(err)
}
pods, err = spc.addTerminatingPod(set, 4)
if err != nil {
t.Error(err)
}
ssc.syncStatefulSet(set, pods)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Error(err)
}
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Error(err)
}
if len(pods) != 5 {
t.Error("StatefulSet does not respect termination")
}
sort.Sort(ascendingOrdinal(pods))
spc.DeleteStatefulPod(set, pods[3])
spc.DeleteStatefulPod(set, pods[4])
*set.Spec.Replicas = 0
if err := scaleDownStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn down StatefulSet : %s", err)
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
t.Error(err)
} else {
set = obj.(*apps.StatefulSet)
}
if set.Status.Replicas != 0 {
t.Errorf("set.Status.Replicas = %v; want 0", set.Status.Replicas)
}
}
func TestStatefulSetControllerBlocksScaling(t *testing.T) {
set := newStatefulSet(3)
ssc, spc := newFakeStatefulSetController(set)
if err := scaleUpStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn up StatefulSet : %s", err)
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
t.Error(err)
} else {
set = obj.(*apps.StatefulSet)
}
if set.Status.Replicas != 3 {
t.Errorf("set.Status.Replicas = %v; want 3", set.Status.Replicas)
}
*set.Spec.Replicas = 5
fakeResourceVersion(set)
spc.setsIndexer.Update(set)
pods, err := spc.setPodTerminated(set, 0)
if err != nil {
t.Error("Failed to set pod terminated at ordinal 0")
}
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Error(err)
}
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Error(err)
}
if len(pods) != 3 {
t.Error("StatefulSet does not block scaling")
}
sort.Sort(ascendingOrdinal(pods))
spc.DeleteStatefulPod(set, pods[0])
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Error(err)
}
if len(pods) != 3 {
t.Error("StatefulSet does not resume when terminated Pod is removed")
}
}
func TestStatefulSetControllerDeletionTimestamp(t *testing.T) {
set := newStatefulSet(3)
set.DeletionTimestamp = new(metav1.Time)
ssc, spc := newFakeStatefulSetController(set)
spc.setsIndexer.Add(set)
// Force a sync. It should not try to create any Pods.
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Fatal(err)
}
pods, err := spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Fatal(err)
}
if got, want := len(pods), 0; got != want {
t.Errorf("len(pods) = %v, want %v", got, want)
}
}
func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) {
set := newStatefulSet(3)
// The bare client says it IS deleted.
set.DeletionTimestamp = new(metav1.Time)
ssc, spc := newFakeStatefulSetController(set)
// The lister (cache) says it's NOT deleted.
set2 := *set
set2.DeletionTimestamp = nil
spc.setsIndexer.Add(&set2)
// The recheck occurs in the presence of a matching orphan.
pod := newStatefulSetPod(set, 1)
pod.OwnerReferences = nil
spc.podsIndexer.Add(pod)
// Force a sync. It should not try to create any Pods.
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Fatal(err)
}
pods, err := spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Fatal(err)
}
if got, want := len(pods), 1; got != want {
t.Errorf("len(pods) = %v, want %v", got, want)
}
}
func TestStatefulSetControllerAddPod(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set1 := newStatefulSet(3)
set2 := newStatefulSet(3)
pod1 := newStatefulSetPod(set1, 0)
pod2 := newStatefulSetPod(set2, 0)
spc.setsIndexer.Add(set1)
spc.setsIndexer.Add(set2)
ssc.addPod(pod1)
key, done := ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
ssc.queue.Done(key)
ssc.addPod(pod2)
key, done = ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
ssc.queue.Done(key)
}
func TestStatefulSetControllerAddPodOrphan(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set1 := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
set3 := newStatefulSet(3)
set3.Name = "foo3"
set3.Spec.Selector.MatchLabels = map[string]string{"foo3": "bar"}
pod := newStatefulSetPod(set1, 0)
spc.setsIndexer.Add(set1)
spc.setsIndexer.Add(set2)
spc.setsIndexer.Add(set3)
// Make pod an orphan. Expect matching sets to be queued.
pod.OwnerReferences = nil
ssc.addPod(pod)
if got, want := ssc.queue.Len(), 2; got != want {
t.Errorf("queue.Len() = %v, want %v", got, want)
}
}
func TestStatefulSetControllerAddPodNoSet(t *testing.T) {
ssc, _ := newFakeStatefulSetController()
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
ssc.addPod(pod)
ssc.queue.ShutDown()
key, _ := ssc.queue.Get()
if key != nil {
t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key)
}
}
func TestStatefulSetControllerUpdatePod(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set1 := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod1 := newStatefulSetPod(set1, 0)
pod2 := newStatefulSetPod(set2, 0)
spc.setsIndexer.Add(set1)
spc.setsIndexer.Add(set2)
prev := *pod1
fakeResourceVersion(pod1)
ssc.updatePod(&prev, pod1)
key, done := ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
prev = *pod2
fakeResourceVersion(pod2)
ssc.updatePod(&prev, pod2)
key, done = ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
}
func TestStatefulSetControllerUpdatePodWithNoSet(t *testing.T) {
ssc, _ := newFakeStatefulSetController()
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
prev := *pod
fakeResourceVersion(pod)
ssc.updatePod(&prev, pod)
ssc.queue.ShutDown()
key, _ := ssc.queue.Get()
if key != nil {
t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key)
}
}
func TestStatefulSetControllerUpdatePodWithSameVersion(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
spc.setsIndexer.Add(set)
ssc.updatePod(pod, pod)
ssc.queue.ShutDown()
key, _ := ssc.queue.Get()
if key != nil {
t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key)
}
}
func TestStatefulSetControllerUpdatePodOrphanWithNewLabels(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
pod.OwnerReferences = nil
set2 := newStatefulSet(3)
set2.Name = "foo2"
spc.setsIndexer.Add(set)
spc.setsIndexer.Add(set2)
clone := *pod
clone.Labels = map[string]string{"foo2": "bar2"}
fakeResourceVersion(&clone)
ssc.updatePod(&clone, pod)
if got, want := ssc.queue.Len(), 2; got != want {
t.Errorf("queue.Len() = %v, want %v", got, want)
}
}
func TestStatefulSetControllerUpdatePodChangeControllerRef(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod := newStatefulSetPod(set, 0)
pod2 := newStatefulSetPod(set2, 0)
spc.setsIndexer.Add(set)
spc.setsIndexer.Add(set2)
clone := *pod
clone.OwnerReferences = pod2.OwnerReferences
fakeResourceVersion(&clone)
ssc.updatePod(&clone, pod)
if got, want := ssc.queue.Len(), 2; got != want {
t.Errorf("queue.Len() = %v, want %v", got, want)
}
}
func TestStatefulSetControllerUpdatePodRelease(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod := newStatefulSetPod(set, 0)
spc.setsIndexer.Add(set)
spc.setsIndexer.Add(set2)
clone := *pod
clone.OwnerReferences = nil
fakeResourceVersion(&clone)
ssc.updatePod(pod, &clone)
if got, want := ssc.queue.Len(), 2; got != want {
t.Errorf("queue.Len() = %v, want %v", got, want)
}
}
func TestStatefulSetControllerDeletePod(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set1 := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod1 := newStatefulSetPod(set1, 0)
pod2 := newStatefulSetPod(set2, 0)
spc.setsIndexer.Add(set1)
spc.setsIndexer.Add(set2)
ssc.deletePod(pod1)
key, done := ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
ssc.deletePod(pod2)
key, done = ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
}
func TestStatefulSetControllerDeletePodOrphan(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set1 := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod1 := newStatefulSetPod(set1, 0)
spc.setsIndexer.Add(set1)
spc.setsIndexer.Add(set2)
pod1.OwnerReferences = nil
ssc.deletePod(pod1)
if got, want := ssc.queue.Len(), 0; got != want {
t.Errorf("queue.Len() = %v, want %v", got, want)
}
}
func TestStatefulSetControllerDeletePodTombstone(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
spc.setsIndexer.Add(set)
tombstoneKey, _ := controller.KeyFunc(pod)
tombstone := cache.DeletedFinalStateUnknown{Key: tombstoneKey, Obj: pod}
ssc.deletePod(tombstone)
key, done := ssc.queue.Get()
if key == nil || done {
t.Error("failed to enqueue StatefulSet")
} else if key, ok := key.(string); !ok {
t.Error("key is not a string")
} else if expectedKey, _ := controller.KeyFunc(set); expectedKey != key {
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
}
}
func TestStatefulSetControllerGetStatefulSetsForPod(t *testing.T) {
ssc, spc := newFakeStatefulSetController()
set1 := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod := newStatefulSetPod(set1, 0)
spc.setsIndexer.Add(set1)
spc.setsIndexer.Add(set2)
spc.podsIndexer.Add(pod)
sets := ssc.getStatefulSetsForPod(pod)
if got, want := len(sets), 2; got != want {
t.Errorf("len(sets) = %v, want %v", got, want)
}
}
func TestGetPodsForStatefulSetAdopt(t *testing.T) {
set := newStatefulSet(5)
ssc, spc := newFakeStatefulSetController(set)
pod1 := newStatefulSetPod(set, 1)
// pod2 is an orphan with matching labels and name.
pod2 := newStatefulSetPod(set, 2)
pod2.OwnerReferences = nil
// pod3 has wrong labels.
pod3 := newStatefulSetPod(set, 3)
pod3.OwnerReferences = nil
pod3.Labels = nil
// pod4 has wrong name.
pod4 := newStatefulSetPod(set, 4)
pod4.OwnerReferences = nil
pod4.Name = "x" + pod4.Name
spc.podsIndexer.Add(pod1)
spc.podsIndexer.Add(pod2)
spc.podsIndexer.Add(pod3)
spc.podsIndexer.Add(pod4)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Fatal(err)
}
pods, err := ssc.getPodsForStatefulSet(set, selector)
if err != nil {
t.Fatalf("getPodsForStatefulSet() error: %v", err)
}
got := sets.NewString()
for _, pod := range pods {
got.Insert(pod.Name)
}
// pod2 should be claimed, pod3 and pod4 ignored
want := sets.NewString(pod1.Name, pod2.Name)
if !got.Equal(want) {
t.Errorf("getPodsForStatefulSet() = %v, want %v", got, want)
}
}
func TestGetPodsForStatefulSetRelease(t *testing.T) {
set := newStatefulSet(3)
ssc, spc := newFakeStatefulSetController(set)
pod1 := newStatefulSetPod(set, 1)
// pod2 is owned but has wrong name.
pod2 := newStatefulSetPod(set, 2)
pod2.Name = "x" + pod2.Name
// pod3 is owned but has wrong labels.
pod3 := newStatefulSetPod(set, 3)
pod3.Labels = nil
// pod4 is an orphan that doesn't match.
pod4 := newStatefulSetPod(set, 4)
pod4.OwnerReferences = nil
pod4.Labels = nil
spc.podsIndexer.Add(pod1)
spc.podsIndexer.Add(pod2)
spc.podsIndexer.Add(pod3)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Fatal(err)
}
pods, err := ssc.getPodsForStatefulSet(set, selector)
if err != nil {
t.Fatalf("getPodsForStatefulSet() error: %v", err)
}
got := sets.NewString()
for _, pod := range pods {
got.Insert(pod.Name)
}
// Expect only pod1 (pod2 and pod3 should be released, pod4 ignored).
want := sets.NewString(pod1.Name)
if !got.Equal(want) {
t.Errorf("getPodsForStatefulSet() = %v, want %v", got, want)
}
}
func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSetController, *fakeStatefulPodControl) {
client := fake.NewSimpleClientset(initialObjects...)
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
fpc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets())
ssu := newFakeStatefulSetStatusUpdater(informerFactory.Apps().V1beta1().StatefulSets())
ssc := NewStatefulSetController(
informerFactory.Core().V1().Pods(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Apps().V1beta1().ControllerRevisions(),
client,
)
ssh := history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions())
ssc.podListerSynced = alwaysReady
ssc.setListerSynced = alwaysReady
ssc.control = NewDefaultStatefulSetControl(fpc, ssu, ssh)
return ssc, fpc
}
func fakeWorker(ssc *StatefulSetController) {
if obj, done := ssc.queue.Get(); !done {
ssc.sync(obj.(string))
ssc.queue.Done(obj)
}
}
func getPodAtOrdinal(pods []*v1.Pod, ordinal int) *v1.Pod {
if 0 > ordinal || ordinal >= len(pods) {
return nil
}
sort.Sort(ascendingOrdinal(pods))
return pods[ordinal]
}
func scaleUpStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetController, spc *fakeStatefulPodControl) error {
spc.setsIndexer.Add(set)
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
return err
}
for set.Status.ReadyReplicas < *set.Spec.Replicas {
pods, err := spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
return err
}
ord := len(pods) - 1
pod := getPodAtOrdinal(pods, ord)
if pods, err = spc.setPodPending(set, ord); err != nil {
return err
}
pod = getPodAtOrdinal(pods, ord)
ssc.addPod(pod)
fakeWorker(ssc)
pod = getPodAtOrdinal(pods, ord)
prev := *pod
if pods, err = spc.setPodRunning(set, ord); err != nil {
return err
}
pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod)
fakeWorker(ssc)
pod = getPodAtOrdinal(pods, ord)
prev = *pod
if pods, err = spc.setPodReady(set, ord); err != nil {
return err
}
pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod)
fakeWorker(ssc)
if err := assertMonotonicInvariants(set, spc); err != nil {
return err
}
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
return err
} else {
set = obj.(*apps.StatefulSet)
}
}
return assertMonotonicInvariants(set, spc)
}
func scaleDownStatefulSetController(set *apps.StatefulSet, ssc *StatefulSetController, spc *fakeStatefulPodControl) error {
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
return err
}
pods, err := spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
return err
}
ord := len(pods) - 1
pod := getPodAtOrdinal(pods, ord)
prev := *pod
fakeResourceVersion(set)
spc.setsIndexer.Add(set)
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
pods, err = spc.addTerminatingPod(set, ord)
if err != nil {
return err
}
pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod)
fakeWorker(ssc)
spc.DeleteStatefulPod(set, pod)
ssc.deletePod(pod)
fakeWorker(ssc)
for set.Status.Replicas > *set.Spec.Replicas {
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
return err
}
ord := len(pods)
pods, err = spc.addTerminatingPod(set, ord)
if err != nil {
return err
}
pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod)
fakeWorker(ssc)
spc.DeleteStatefulPod(set, pod)
ssc.deletePod(pod)
fakeWorker(ssc)
if obj, _, err := spc.setsIndexer.Get(set); err != nil {
return err
} else {
set = obj.(*apps.StatefulSet)
}
}
return assertMonotonicInvariants(set, spc)
}

View File

@ -0,0 +1,404 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"bytes"
"encoding/json"
"fmt"
"regexp"
"strconv"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/history"
)
// maxUpdateRetries is the maximum number of retries used for update conflict resolution prior to failure
const maxUpdateRetries = 10
// updateConflictError is the error used to indicate that the maximum number of retries against the API server have
// been attempted and we need to back off
var updateConflictError = fmt.Errorf("aborting update after %d attempts", maxUpdateRetries)
var patchCodec = scheme.Codecs.LegacyCodec(apps.SchemeGroupVersion)
// overlappingStatefulSets sorts a list of StatefulSets by creation timestamp, using their names as a tie breaker.
// Generally used to tie break between StatefulSets that have overlapping selectors.
type overlappingStatefulSets []*apps.StatefulSet
func (o overlappingStatefulSets) Len() int { return len(o) }
func (o overlappingStatefulSets) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o overlappingStatefulSets) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// statefulPodRegex is a regular expression that extracts the parent StatefulSet and ordinal from the Name of a Pod
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
// getParentNameAndOrdinal gets the name of pod's parent StatefulSet and pod's ordinal as extracted from its Name. If
// the Pod was not created by a StatefulSet, its parent is considered to be empty string, and its ordinal is considered
// to be -1.
func getParentNameAndOrdinal(pod *v1.Pod) (string, int) {
parent := ""
ordinal := -1
subMatches := statefulPodRegex.FindStringSubmatch(pod.Name)
if len(subMatches) < 3 {
return parent, ordinal
}
parent = subMatches[1]
if i, err := strconv.ParseInt(subMatches[2], 10, 32); err == nil {
ordinal = int(i)
}
return parent, ordinal
}
// getParentName gets the name of pod's parent StatefulSet. If pod has not parent, the empty string is returned.
func getParentName(pod *v1.Pod) string {
parent, _ := getParentNameAndOrdinal(pod)
return parent
}
// getOrdinal gets pod's ordinal. If pod has no ordinal, -1 is returned.
func getOrdinal(pod *v1.Pod) int {
_, ordinal := getParentNameAndOrdinal(pod)
return ordinal
}
// getPodName gets the name of set's child Pod with an ordinal index of ordinal
func getPodName(set *apps.StatefulSet, ordinal int) string {
return fmt.Sprintf("%s-%d", set.Name, ordinal)
}
// getPersistentVolumeClaimName gets the name of PersistentVolumeClaim for a Pod with an ordinal index of ordinal. claim
// must be a PersistentVolumeClaim from set's VolumeClaims template.
func getPersistentVolumeClaimName(set *apps.StatefulSet, claim *v1.PersistentVolumeClaim, ordinal int) string {
// NOTE: This name format is used by the heuristics for zone spreading in ChooseZoneForVolume
return fmt.Sprintf("%s-%s-%d", claim.Name, set.Name, ordinal)
}
// isMemberOf tests if pod is a member of set.
func isMemberOf(set *apps.StatefulSet, pod *v1.Pod) bool {
return getParentName(pod) == set.Name
}
// identityMatches returns true if pod has a valid identity and network identity for a member of set.
func identityMatches(set *apps.StatefulSet, pod *v1.Pod) bool {
parent, ordinal := getParentNameAndOrdinal(pod)
return ordinal >= 0 &&
set.Name == parent &&
pod.Name == getPodName(set, ordinal) &&
pod.Namespace == set.Namespace &&
pod.Labels[apps.StatefulSetPodNameLabel] == pod.Name
}
// storageMatches returns true if pod's Volumes cover the set of PersistentVolumeClaims
func storageMatches(set *apps.StatefulSet, pod *v1.Pod) bool {
ordinal := getOrdinal(pod)
if ordinal < 0 {
return false
}
volumes := make(map[string]v1.Volume, len(pod.Spec.Volumes))
for _, volume := range pod.Spec.Volumes {
volumes[volume.Name] = volume
}
for _, claim := range set.Spec.VolumeClaimTemplates {
volume, found := volumes[claim.Name]
if !found ||
volume.VolumeSource.PersistentVolumeClaim == nil ||
volume.VolumeSource.PersistentVolumeClaim.ClaimName !=
getPersistentVolumeClaimName(set, &claim, ordinal) {
return false
}
}
return true
}
// getPersistentVolumeClaims gets a map of PersistentVolumeClaims to their template names, as defined in set. The
// returned PersistentVolumeClaims are each constructed with a the name specific to the Pod. This name is determined
// by getPersistentVolumeClaimName.
func getPersistentVolumeClaims(set *apps.StatefulSet, pod *v1.Pod) map[string]v1.PersistentVolumeClaim {
ordinal := getOrdinal(pod)
templates := set.Spec.VolumeClaimTemplates
claims := make(map[string]v1.PersistentVolumeClaim, len(templates))
for i := range templates {
claim := templates[i]
claim.Name = getPersistentVolumeClaimName(set, &claim, ordinal)
claim.Namespace = set.Namespace
claim.Labels = set.Spec.Selector.MatchLabels
claims[templates[i].Name] = claim
}
return claims
}
// updateStorage updates pod's Volumes to conform with the PersistentVolumeClaim of set's templates. If pod has
// conflicting local Volumes these are replaced with Volumes that conform to the set's templates.
func updateStorage(set *apps.StatefulSet, pod *v1.Pod) {
currentVolumes := pod.Spec.Volumes
claims := getPersistentVolumeClaims(set, pod)
newVolumes := make([]v1.Volume, 0, len(claims))
for name, claim := range claims {
newVolumes = append(newVolumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
// TODO: Use source definition to set this value when we have one.
ReadOnly: false,
},
},
})
}
for i := range currentVolumes {
if _, ok := claims[currentVolumes[i].Name]; !ok {
newVolumes = append(newVolumes, currentVolumes[i])
}
}
pod.Spec.Volumes = newVolumes
}
func initIdentity(set *apps.StatefulSet, pod *v1.Pod) {
updateIdentity(set, pod)
// Set these immutable fields only on initial Pod creation, not updates.
pod.Spec.Hostname = pod.Name
pod.Spec.Subdomain = set.Spec.ServiceName
}
// updateIdentity updates pod's name, hostname, and subdomain, and StatefulSetPodNameLabel to conform to set's name
// and headless service.
func updateIdentity(set *apps.StatefulSet, pod *v1.Pod) {
pod.Name = getPodName(set, getOrdinal(pod))
pod.Namespace = set.Namespace
if pod.Labels == nil {
pod.Labels = make(map[string]string)
}
pod.Labels[apps.StatefulSetPodNameLabel] = pod.Name
}
// isRunningAndReady returns true if pod is in the PodRunning Phase, if it has a condition of PodReady.
func isRunningAndReady(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod)
}
// isCreated returns true if pod has been created and is maintained by the API server
func isCreated(pod *v1.Pod) bool {
return pod.Status.Phase != ""
}
// isFailed returns true if pod has a Phase of PodFailed
func isFailed(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodFailed
}
// isTerminating returns true if pod's DeletionTimestamp has been set
func isTerminating(pod *v1.Pod) bool {
return pod.DeletionTimestamp != nil
}
// isHealthy returns true if pod is running and ready and has not been terminated
func isHealthy(pod *v1.Pod) bool {
return isRunningAndReady(pod) && !isTerminating(pod)
}
// allowsBurst is true if the alpha burst annotation is set.
func allowsBurst(set *apps.StatefulSet) bool {
return set.Spec.PodManagementPolicy == apps.ParallelPodManagement
}
// setPodRevision sets the revision of Pod to revision by adding the StatefulSetRevisionLabel
func setPodRevision(pod *v1.Pod, revision string) {
if pod.Labels == nil {
pod.Labels = make(map[string]string)
}
pod.Labels[apps.StatefulSetRevisionLabel] = revision
}
// getPodRevision gets the revision of Pod by inspecting the StatefulSetRevisionLabel. If pod has no revision the empty
// string is returned.
func getPodRevision(pod *v1.Pod) string {
if pod.Labels == nil {
return ""
}
return pod.Labels[apps.StatefulSetRevisionLabel]
}
// newStatefulSetPod returns a new Pod conforming to the set's Spec with an identity generated from ordinal.
func newStatefulSetPod(set *apps.StatefulSet, ordinal int) *v1.Pod {
pod, _ := controller.GetPodFromTemplate(&set.Spec.Template, set, metav1.NewControllerRef(set, controllerKind))
pod.Name = getPodName(set, ordinal)
initIdentity(set, pod)
updateStorage(set, pod)
return pod
}
// newVersionedStatefulSetPod creates a new Pod for a StatefulSet. currentSet is the representation of the set at the
// current revision. updateSet is the representation of the set at the updateRevision. currentRevision is the name of
// the current revision. updateRevision is the name of the update revision. ordinal is the ordinal of the Pod. If the
// returned error is nil, the returned Pod is valid.
func newVersionedStatefulSetPod(currentSet, updateSet *apps.StatefulSet, currentRevision, updateRevision string, ordinal int) *v1.Pod {
if currentSet.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType &&
(currentSet.Spec.UpdateStrategy.RollingUpdate == nil && ordinal < int(currentSet.Status.CurrentReplicas)) ||
(currentSet.Spec.UpdateStrategy.RollingUpdate != nil && ordinal < int(*currentSet.Spec.UpdateStrategy.RollingUpdate.Partition)) {
pod := newStatefulSetPod(currentSet, ordinal)
setPodRevision(pod, currentRevision)
return pod
}
pod := newStatefulSetPod(updateSet, ordinal)
setPodRevision(pod, updateRevision)
return pod
}
// Match check if the given StatefulSet's template matches the template stored in the given history.
func Match(ss *apps.StatefulSet, history *apps.ControllerRevision) (bool, error) {
patch, err := getPatch(ss)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getPatch returns a strategic merge patch that can be applied to restore a StatefulSet to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getPatch(set *apps.StatefulSet) ([]byte, error) {
str, err := runtime.Encode(patchCodec, set)
if err != nil {
return nil, err
}
var raw map[string]interface{}
json.Unmarshal([]byte(str), &raw)
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
// newRevision creates a new ControllerRevision containing a patch that reapplies the target state of set.
// The Revision of the returned ControllerRevision is set to revision. If the returned error is nil, the returned
// ControllerRevision is valid. StatefulSet revisions are stored as patches that re-apply the current state of set
// to a new StatefulSet using a strategic merge patch to replace the saved state of the new StatefulSet.
func newRevision(set *apps.StatefulSet, revision int64, collisionCount *int32) (*apps.ControllerRevision, error) {
patch, err := getPatch(set)
if err != nil {
return nil, err
}
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
return nil, err
}
cr, err := history.NewControllerRevision(set,
controllerKind,
selector,
runtime.RawExtension{Raw: patch},
revision,
collisionCount)
if err != nil {
return nil, err
}
if cr.ObjectMeta.Annotations == nil {
cr.ObjectMeta.Annotations = make(map[string]string)
}
for key, value := range set.Annotations {
cr.ObjectMeta.Annotations[key] = value
}
return cr, nil
}
// ApplyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error
// is nil, the returned StatefulSet is valid.
func ApplyRevision(set *apps.StatefulSet, revision *apps.ControllerRevision) (*apps.StatefulSet, error) {
clone := set.DeepCopy()
patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(patchCodec, clone)), revision.Data.Raw, clone)
if err != nil {
return nil, err
}
err = json.Unmarshal(patched, clone)
if err != nil {
return nil, err
}
return clone, nil
}
// nextRevision finds the next valid revision number based on revisions. If the length of revisions
// is 0 this is 1. Otherwise, it is 1 greater than the largest revision's Revision. This method
// assumes that revisions has been sorted by Revision.
func nextRevision(revisions []*apps.ControllerRevision) int64 {
count := len(revisions)
if count <= 0 {
return 1
}
return revisions[count-1].Revision + 1
}
// inconsistentStatus returns true if the ObservedGeneration of status is greater than set's
// Generation or if any of the status's fields do not match those of set's status.
func inconsistentStatus(set *apps.StatefulSet, status *apps.StatefulSetStatus) bool {
return set.Status.ObservedGeneration == nil ||
*status.ObservedGeneration > *set.Status.ObservedGeneration ||
status.Replicas != set.Status.Replicas ||
status.CurrentReplicas != set.Status.CurrentReplicas ||
status.ReadyReplicas != set.Status.ReadyReplicas ||
status.UpdatedReplicas != set.Status.UpdatedReplicas ||
status.CurrentRevision != set.Status.CurrentRevision ||
status.UpdateRevision != set.Status.UpdateRevision
}
// completeRollingUpdate completes a rolling update when all of set's replica Pods have been updated
// to the updateRevision. status's currentRevision is set to updateRevision and its' updateRevision
// is set to the empty string. status's currentReplicas is set to updateReplicas and its updateReplicas
// are set to 0.
func completeRollingUpdate(set *apps.StatefulSet, status *apps.StatefulSetStatus) {
if set.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType &&
status.UpdatedReplicas == status.Replicas &&
status.ReadyReplicas == status.Replicas {
status.CurrentReplicas = status.UpdatedReplicas
status.CurrentRevision = status.UpdateRevision
}
}
// ascendingOrdinal is a sort.Interface that Sorts a list of Pods based on the ordinals extracted
// from the Pod. Pod's that have not been constructed by StatefulSet's have an ordinal of -1, and are therefore pushed
// to the front of the list.
type ascendingOrdinal []*v1.Pod
func (ao ascendingOrdinal) Len() int {
return len(ao)
}
func (ao ascendingOrdinal) Swap(i, j int) {
ao[i], ao[j] = ao[j], ao[i]
}
func (ao ascendingOrdinal) Less(i, j int) bool {
return getOrdinal(ao[i]) < getOrdinal(ao[j])
}

View File

@ -0,0 +1,375 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statefulset
import (
"fmt"
"math/rand"
"sort"
"strconv"
"testing"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/history"
)
func TestGetParentNameAndOrdinal(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if parent, ordinal := getParentNameAndOrdinal(pod); parent != set.Name {
t.Errorf("Extracted the wrong parent name expected %s found %s", set.Name, parent)
} else if ordinal != 1 {
t.Errorf("Extracted the wrong ordinal expected %d found %d", 1, ordinal)
}
pod.Name = "1-bar"
if parent, ordinal := getParentNameAndOrdinal(pod); parent != "" {
t.Error("Expected empty string for non-member Pod parent")
} else if ordinal != -1 {
t.Error("Expected -1 for non member Pod ordinal")
}
}
func TestIsMemberOf(t *testing.T) {
set := newStatefulSet(3)
set2 := newStatefulSet(3)
set2.Name = "foo2"
pod := newStatefulSetPod(set, 1)
if !isMemberOf(set, pod) {
t.Error("isMemberOf retruned false negative")
}
if isMemberOf(set2, pod) {
t.Error("isMemberOf returned false positive")
}
}
func TestIdentityMatches(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if !identityMatches(set, pod) {
t.Error("Newly created Pod has a bad identity")
}
pod.Name = "foo"
if identityMatches(set, pod) {
t.Error("identity matches for a Pod with the wrong name")
}
pod = newStatefulSetPod(set, 1)
pod.Namespace = ""
if identityMatches(set, pod) {
t.Error("identity matches for a Pod with the wrong namespace")
}
pod = newStatefulSetPod(set, 1)
delete(pod.Labels, apps.StatefulSetPodNameLabel)
if identityMatches(set, pod) {
t.Error("identity matches for a Pod with the wrong statefulSetPodNameLabel")
}
}
func TestStorageMatches(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if !storageMatches(set, pod) {
t.Error("Newly created Pod has a invalid stroage")
}
pod.Spec.Volumes = nil
if storageMatches(set, pod) {
t.Error("Pod with invalid Volumes has valid storage")
}
pod = newStatefulSetPod(set, 1)
for i := range pod.Spec.Volumes {
pod.Spec.Volumes[i].PersistentVolumeClaim = nil
}
if storageMatches(set, pod) {
t.Error("Pod with invalid Volumes claim valid storage")
}
pod = newStatefulSetPod(set, 1)
for i := range pod.Spec.Volumes {
if pod.Spec.Volumes[i].PersistentVolumeClaim != nil {
pod.Spec.Volumes[i].PersistentVolumeClaim.ClaimName = "foo"
}
}
if storageMatches(set, pod) {
t.Error("Pod with invalid Volumes claim valid storage")
}
pod = newStatefulSetPod(set, 1)
pod.Name = "bar"
if storageMatches(set, pod) {
t.Error("Pod with invalid ordinal has valid storage")
}
}
func TestUpdateIdentity(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if !identityMatches(set, pod) {
t.Error("Newly created Pod has a bad identity")
}
pod.Namespace = ""
if identityMatches(set, pod) {
t.Error("identity matches for a Pod with the wrong namespace")
}
updateIdentity(set, pod)
if !identityMatches(set, pod) {
t.Error("updateIdentity failed to update the Pods namespace")
}
delete(pod.Labels, apps.StatefulSetPodNameLabel)
updateIdentity(set, pod)
if !identityMatches(set, pod) {
t.Error("updateIdentity failed to restore the statefulSetPodName label")
}
}
func TestUpdateStorage(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if !storageMatches(set, pod) {
t.Error("Newly created Pod has a invalid stroage")
}
pod.Spec.Volumes = nil
if storageMatches(set, pod) {
t.Error("Pod with invalid Volumes has valid storage")
}
updateStorage(set, pod)
if !storageMatches(set, pod) {
t.Error("updateStorage failed to recreate volumes")
}
pod = newStatefulSetPod(set, 1)
for i := range pod.Spec.Volumes {
pod.Spec.Volumes[i].PersistentVolumeClaim = nil
}
if storageMatches(set, pod) {
t.Error("Pod with invalid Volumes claim valid storage")
}
updateStorage(set, pod)
if !storageMatches(set, pod) {
t.Error("updateStorage failed to recreate volume claims")
}
pod = newStatefulSetPod(set, 1)
for i := range pod.Spec.Volumes {
if pod.Spec.Volumes[i].PersistentVolumeClaim != nil {
pod.Spec.Volumes[i].PersistentVolumeClaim.ClaimName = "foo"
}
}
if storageMatches(set, pod) {
t.Error("Pod with invalid Volumes claim valid storage")
}
updateStorage(set, pod)
if !storageMatches(set, pod) {
t.Error("updateStorage failed to recreate volume claim names")
}
}
func TestIsRunningAndReady(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if isRunningAndReady(pod) {
t.Error("isRunningAndReady does not respect Pod phase")
}
pod.Status.Phase = v1.PodRunning
if isRunningAndReady(pod) {
t.Error("isRunningAndReady does not respect Pod condition")
}
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
podutil.UpdatePodCondition(&pod.Status, &condition)
if !isRunningAndReady(pod) {
t.Error("Pod should be running and ready")
}
}
func TestAscendingOrdinal(t *testing.T) {
set := newStatefulSet(10)
pods := make([]*v1.Pod, 10)
perm := rand.Perm(10)
for i, v := range perm {
pods[i] = newStatefulSetPod(set, v)
}
sort.Sort(ascendingOrdinal(pods))
if !sort.IsSorted(ascendingOrdinal(pods)) {
t.Error("ascendingOrdinal fails to sort Pods")
}
}
func TestOverlappingStatefulSets(t *testing.T) {
sets := make([]*apps.StatefulSet, 10)
perm := rand.Perm(10)
for i, v := range perm {
sets[i] = newStatefulSet(10)
sets[i].CreationTimestamp = metav1.NewTime(sets[i].CreationTimestamp.Add(time.Duration(v) * time.Second))
}
sort.Sort(overlappingStatefulSets(sets))
if !sort.IsSorted(overlappingStatefulSets(sets)) {
t.Error("ascendingOrdinal fails to sort Pods")
}
for i, v := range perm {
sets[i] = newStatefulSet(10)
sets[i].Name = strconv.FormatInt(int64(v), 10)
}
sort.Sort(overlappingStatefulSets(sets))
if !sort.IsSorted(overlappingStatefulSets(sets)) {
t.Error("ascendingOrdinal fails to sort Pods")
}
}
func TestNewPodControllerRef(t *testing.T) {
set := newStatefulSet(1)
pod := newStatefulSetPod(set, 0)
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
t.Fatalf("No ControllerRef found on new pod")
}
if got, want := controllerRef.APIVersion, apps.SchemeGroupVersion.String(); got != want {
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
}
if got, want := controllerRef.Kind, "StatefulSet"; got != want {
t.Errorf("controllerRef.Kind = %q, want %q", got, want)
}
if got, want := controllerRef.Name, set.Name; got != want {
t.Errorf("controllerRef.Name = %q, want %q", got, want)
}
if got, want := controllerRef.UID, set.UID; got != want {
t.Errorf("controllerRef.UID = %q, want %q", got, want)
}
if got, want := *controllerRef.Controller, true; got != want {
t.Errorf("controllerRef.Controller = %v, want %v", got, want)
}
}
func TestCreateApplyRevision(t *testing.T) {
set := newStatefulSet(1)
set.Status.CollisionCount = new(int32)
revision, err := newRevision(set, 1, set.Status.CollisionCount)
if err != nil {
t.Fatal(err)
}
set.Spec.Template.Spec.Containers[0].Name = "foo"
if set.Annotations == nil {
set.Annotations = make(map[string]string)
}
key := "foo"
expectedValue := "bar"
set.Annotations[key] = expectedValue
restoredSet, err := ApplyRevision(set, revision)
if err != nil {
t.Fatal(err)
}
restoredRevision, err := newRevision(restoredSet, 2, restoredSet.Status.CollisionCount)
if err != nil {
t.Fatal(err)
}
if !history.EqualRevision(revision, restoredRevision) {
t.Errorf("wanted %v got %v", string(revision.Data.Raw), string(restoredRevision.Data.Raw))
}
value, ok := restoredRevision.Annotations[key]
if !ok {
t.Errorf("missing annotation %s", key)
}
if value != expectedValue {
t.Errorf("for annotation %s wanted %s got %s", key, expectedValue, value)
}
}
func newPVC(name string) v1.PersistentVolumeClaim {
return v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
},
},
},
}
}
func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount) *apps.StatefulSet {
mounts := append(petMounts, podMounts...)
claims := []v1.PersistentVolumeClaim{}
for _, m := range petMounts {
claims = append(claims, newPVC(m.Name))
}
vols := []v1.Volume{}
for _, m := range podMounts {
vols = append(vols, v1.Volume{
Name: m.Name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name),
},
},
})
}
template := v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
VolumeMounts: mounts,
},
},
Volumes: vols,
},
}
template.Labels = map[string]string{"foo": "bar"}
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
UID: types.UID("test"),
},
Spec: apps.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: template,
VolumeClaimTemplates: claims,
ServiceName: "governingsvc",
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
RevisionHistoryLimit: func() *int32 {
limit := int32(2)
return &limit
}(),
},
}
}
func newStatefulSet(replicas int) *apps.StatefulSet {
petMounts := []v1.VolumeMount{
{Name: "datadir", MountPath: "/tmp/zookeeper"},
}
podMounts := []v1.VolumeMount{
{Name: "home", MountPath: "/home"},
}
return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts)
}