mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
5
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
@ -14,14 +14,13 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
@ -56,6 +55,7 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
@ -64,7 +64,6 @@ go_library(
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go
generated
vendored
@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool {
|
||||
func (tc *TokenCleaner) syncFunc(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/certificates/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/certificates/OWNERS
generated
vendored
@ -1,3 +1,4 @@
|
||||
reviewers:
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- awly
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD
generated
vendored
@ -28,10 +28,8 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/certificates/v1beta1:go_default_library",
|
||||
"//pkg/controller/certificates:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
|
34
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go
generated
vendored
@ -25,12 +25,10 @@ import (
|
||||
|
||||
authorization "k8s.io/api/authorization/v1beta1"
|
||||
capi "k8s.io/api/certificates/v1beta1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
k8s_certificates_v1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller/certificates"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
type csrRecognizer struct {
|
||||
@ -69,13 +67,6 @@ func recognizers() []csrRecognizer {
|
||||
successMessage: "Auto approving kubelet client certificate after SubjectAccessReview.",
|
||||
},
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
recognizers = append(recognizers, csrRecognizer{
|
||||
recognize: isSelfNodeServerCert,
|
||||
permission: authorization.ResourceAttributes{Group: "certificates.k8s.io", Resource: "certificatesigningrequests", Verb: "create", Subresource: "selfnodeserver"},
|
||||
successMessage: "Auto approving self kubelet server certificate after SubjectAccessReview.",
|
||||
})
|
||||
}
|
||||
return recognizers
|
||||
}
|
||||
|
||||
@ -201,28 +192,3 @@ func isSelfNodeClientCert(csr *capi.CertificateSigningRequest, x509cr *x509.Cert
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var kubeletServerUsages = []capi.KeyUsage{
|
||||
capi.UsageKeyEncipherment,
|
||||
capi.UsageDigitalSignature,
|
||||
capi.UsageServerAuth,
|
||||
}
|
||||
|
||||
func isSelfNodeServerCert(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool {
|
||||
if !reflect.DeepEqual([]string{"system:nodes"}, x509cr.Subject.Organization) {
|
||||
return false
|
||||
}
|
||||
if len(x509cr.DNSNames) == 0 || len(x509cr.IPAddresses) == 0 {
|
||||
return false
|
||||
}
|
||||
if !hasExactUsages(csr, kubeletServerUsages) {
|
||||
return false
|
||||
}
|
||||
if !strings.HasPrefix(x509cr.Subject.CommonName, "system:node:") {
|
||||
return false
|
||||
}
|
||||
if csr.Spec.Username != x509cr.Subject.CommonName {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
71
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove_test.go
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove_test.go
generated
vendored
@ -187,77 +187,6 @@ func TestHandle(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelfNodeServerCertRecognizer(t *testing.T) {
|
||||
defaultCSR := csrBuilder{
|
||||
cn: "system:node:foo",
|
||||
orgs: []string{"system:nodes"},
|
||||
requestor: "system:node:foo",
|
||||
usages: []capi.KeyUsage{
|
||||
capi.UsageKeyEncipherment,
|
||||
capi.UsageDigitalSignature,
|
||||
capi.UsageServerAuth,
|
||||
},
|
||||
dns: []string{"node"},
|
||||
ips: []net.IP{net.ParseIP("192.168.0.1")},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
csrBuilder csrBuilder
|
||||
expectedOutcome bool
|
||||
}{
|
||||
{
|
||||
description: "Success - all requirements met",
|
||||
csrBuilder: defaultCSR,
|
||||
expectedOutcome: true,
|
||||
},
|
||||
{
|
||||
description: "No organization",
|
||||
csrBuilder: func(b csrBuilder) csrBuilder {
|
||||
b.orgs = []string{}
|
||||
return b
|
||||
}(defaultCSR),
|
||||
expectedOutcome: false,
|
||||
},
|
||||
{
|
||||
description: "Wrong organization",
|
||||
csrBuilder: func(b csrBuilder) csrBuilder {
|
||||
b.orgs = append(b.orgs, "new-org")
|
||||
return b
|
||||
}(defaultCSR),
|
||||
expectedOutcome: false,
|
||||
},
|
||||
{
|
||||
description: "Wrong usages",
|
||||
csrBuilder: func(b csrBuilder) csrBuilder {
|
||||
b.usages = []capi.KeyUsage{}
|
||||
return b
|
||||
}(defaultCSR),
|
||||
expectedOutcome: false,
|
||||
},
|
||||
{
|
||||
description: "Wrong common name",
|
||||
csrBuilder: func(b csrBuilder) csrBuilder {
|
||||
b.cn = "wrong-common-name"
|
||||
return b
|
||||
}(defaultCSR),
|
||||
expectedOutcome: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
csr := makeFancyTestCsr(tc.csrBuilder)
|
||||
x509cr, err := k8s_certificates_v1beta1.ParseCSR(csr)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err: %v", err)
|
||||
}
|
||||
if isSelfNodeServerCert(csr, x509cr) != tc.expectedOutcome {
|
||||
t.Errorf("expected recognized to be %v", tc.expectedOutcome)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecognizers(t *testing.T) {
|
||||
goodCases := []func(b *csrBuilder){
|
||||
func(b *csrBuilder) {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go
generated
vendored
@ -58,7 +58,7 @@ func NewCertificateController(
|
||||
// Send events to the apiserver
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
cc := &CertificateController{
|
||||
kubeClient: kubeClient,
|
||||
@ -169,7 +169,7 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
|
||||
func (cc *CertificateController) syncFunc(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
csr, err := cc.csrLister.Get(key)
|
||||
if errors.IsNotFound(err) {
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/cloud/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/cloud/BUILD
generated
vendored
@ -17,6 +17,7 @@ go_library(
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/cloud/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/cloud/OWNERS
generated
vendored
@ -2,7 +2,9 @@ approvers:
|
||||
- thockin
|
||||
- luxas
|
||||
- wlan0
|
||||
- andrewsykim
|
||||
reviewers:
|
||||
- thockin
|
||||
- luxas
|
||||
- wlan0
|
||||
- andrewsykim
|
||||
|
81
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go
generated
vendored
81
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go
generated
vendored
@ -18,6 +18,7 @@ package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -37,6 +38,8 @@ import (
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectrlutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
@ -84,7 +87,7 @@ func NewCloudNodeController(
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@ -110,7 +113,7 @@ func NewCloudNodeController(
|
||||
|
||||
// This controller deletes a node if kubelet is not reporting
|
||||
// and the node is gone from the cloud provider.
|
||||
func (cnc *CloudNodeController) Run() {
|
||||
func (cnc *CloudNodeController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
// The following loops run communicate with the APIServer with a worst case complexity
|
||||
@ -118,10 +121,10 @@ func (cnc *CloudNodeController) Run() {
|
||||
// very infrequently. DO NOT MODIFY this to perform frequent operations.
|
||||
|
||||
// Start a loop to periodically update the node addresses obtained from the cloud
|
||||
go wait.Until(cnc.UpdateNodeStatus, cnc.nodeStatusUpdateFrequency, wait.NeverStop)
|
||||
go wait.Until(cnc.UpdateNodeStatus, cnc.nodeStatusUpdateFrequency, stopCh)
|
||||
|
||||
// Start a loop to periodically check if any nodes have been deleted from cloudprovider
|
||||
go wait.Until(cnc.MonitorNode, cnc.nodeMonitorPeriod, wait.NeverStop)
|
||||
go wait.Until(cnc.MonitorNode, cnc.nodeMonitorPeriod, stopCh)
|
||||
}
|
||||
|
||||
// UpdateNodeStatus updates the node status, such as node addresses
|
||||
@ -152,7 +155,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
return
|
||||
}
|
||||
// Node that isn't present according to the cloud provider shouldn't have its address updated
|
||||
exists, err := ensureNodeExistsByProviderIDOrExternalID(instances, node)
|
||||
exists, err := ensureNodeExistsByProviderID(instances, node)
|
||||
if err != nil {
|
||||
// Continue to update node address when not sure the node is not exists
|
||||
glog.Errorf("%v", err)
|
||||
@ -166,6 +169,12 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
glog.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodeAddresses) == 0 {
|
||||
glog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if a hostname address exists in the cloud provided addresses
|
||||
hostnameExists := false
|
||||
for i := range nodeAddresses {
|
||||
@ -189,7 +198,6 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
glog.Errorf("Specified Node IP not found in cloudprovider")
|
||||
return
|
||||
}
|
||||
nodeAddresses = []v1.NodeAddress{*nodeIP}
|
||||
}
|
||||
newNode := node.DeepCopy()
|
||||
newNode.Status.Addresses = nodeAddresses
|
||||
@ -243,9 +251,27 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
// from the cloud provider. If node cannot be found in cloudprovider, then delete the node immediately
|
||||
if currentReadyCondition != nil {
|
||||
if currentReadyCondition.Status != v1.ConditionTrue {
|
||||
// we need to check this first to get taint working in similar in all cloudproviders
|
||||
// current problem is that shutdown nodes are not working in similar way ie. all cloudproviders
|
||||
// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
|
||||
shutdown, err := nodectrlutil.ShutdownInCloudProvider(context.TODO(), cnc.cloud, node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
|
||||
}
|
||||
|
||||
if shutdown && err == nil {
|
||||
// if node is shutdown add shutdown taint
|
||||
err = controller.AddOrUpdateTaintOnNode(cnc.kubeClient, node.Name, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
// Continue checking the remaining nodes since the current one is shutdown.
|
||||
continue
|
||||
}
|
||||
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node immediately.
|
||||
exists, err := ensureNodeExistsByProviderIDOrExternalID(instances, node)
|
||||
exists, err := ensureNodeExistsByProviderID(instances, node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
|
||||
continue
|
||||
@ -275,6 +301,12 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
}
|
||||
}(node.Name)
|
||||
|
||||
} else {
|
||||
// if taint exist remove taint
|
||||
err = controller.RemoveTaintOffNode(cnc.kubeClient, node.Name, node, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -324,21 +356,18 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
|
||||
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, curNode)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// If user provided an IP address, ensure that IP address is found
|
||||
// in the cloud provider before removing the taint on the node
|
||||
if nodeIP, ok := ensureNodeProvidedIPExists(curNode, nodeAddresses); ok {
|
||||
if nodeIP == nil {
|
||||
glog.Errorf("failed to get specified nodeIP in cloudprovider")
|
||||
return nil
|
||||
return errors.New("failed to find kubelet node IP from cloud provider")
|
||||
}
|
||||
}
|
||||
|
||||
if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
return err
|
||||
} else if instanceType != "" {
|
||||
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
@ -412,23 +441,27 @@ func excludeTaintFromList(taints []v1.Taint, toExclude v1.Taint) []v1.Taint {
|
||||
return newTaints
|
||||
}
|
||||
|
||||
// ensureNodeExistsByProviderIDOrExternalID first checks if the instance exists by the provider id and then by calling external id with node name
|
||||
func ensureNodeExistsByProviderIDOrExternalID(instances cloudprovider.Instances, node *v1.Node) (bool, error) {
|
||||
exists, err := instances.InstanceExistsByProviderID(context.TODO(), node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
providerIDErr := err
|
||||
_, err = instances.ExternalID(context.TODO(), types.NodeName(node.Name))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
// ensureNodeExistsByProviderID checks if the instance exists by the provider id,
|
||||
// If provider id in spec is empty it calls instanceId with node name to get provider id
|
||||
func ensureNodeExistsByProviderID(instances cloudprovider.Instances, node *v1.Node) (bool, error) {
|
||||
providerID := node.Spec.ProviderID
|
||||
if providerID == "" {
|
||||
var err error
|
||||
providerID, err = instances.InstanceID(context.TODO(), types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
|
||||
if providerID == "" {
|
||||
glog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("InstanceExistsByProviderID: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
return instances.InstanceExistsByProviderID(context.TODO(), providerID)
|
||||
}
|
||||
|
||||
func getNodeAddressesByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) ([]v1.NodeAddress, error) {
|
||||
|
181
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller_test.go
generated
vendored
181
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller_test.go
generated
vendored
@ -41,13 +41,14 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEnsureNodeExistsByProviderIDOrNodeName(t *testing.T) {
|
||||
func TestEnsureNodeExistsByProviderID(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
node *v1.Node
|
||||
expectedCalls []string
|
||||
existsByNodeName bool
|
||||
expectedNodeExists bool
|
||||
hasInstanceID bool
|
||||
existsByProviderID bool
|
||||
nodeNameErr error
|
||||
providerIDErr error
|
||||
@ -56,9 +57,10 @@ func TestEnsureNodeExistsByProviderIDOrNodeName(t *testing.T) {
|
||||
testName: "node exists by provider id",
|
||||
existsByProviderID: true,
|
||||
providerIDErr: nil,
|
||||
existsByNodeName: false,
|
||||
hasInstanceID: true,
|
||||
nodeNameErr: errors.New("unimplemented"),
|
||||
expectedCalls: []string{"instance-exists-by-provider-id"},
|
||||
expectedNodeExists: true,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
@ -72,9 +74,10 @@ func TestEnsureNodeExistsByProviderIDOrNodeName(t *testing.T) {
|
||||
testName: "does not exist by provider id",
|
||||
existsByProviderID: false,
|
||||
providerIDErr: nil,
|
||||
existsByNodeName: false,
|
||||
hasInstanceID: true,
|
||||
nodeNameErr: errors.New("unimplemented"),
|
||||
expectedCalls: []string{"instance-exists-by-provider-id"},
|
||||
expectedNodeExists: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
@ -85,28 +88,41 @@ func TestEnsureNodeExistsByProviderIDOrNodeName(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "node exists by node name",
|
||||
existsByProviderID: false,
|
||||
providerIDErr: errors.New("unimplemented"),
|
||||
existsByNodeName: true,
|
||||
testName: "exists by instance id",
|
||||
existsByProviderID: true,
|
||||
providerIDErr: nil,
|
||||
hasInstanceID: true,
|
||||
nodeNameErr: nil,
|
||||
expectedCalls: []string{"instance-exists-by-provider-id", "external-id"},
|
||||
expectedCalls: []string{"instance-id", "instance-exists-by-provider-id"},
|
||||
expectedNodeExists: true,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "does not exist by no instance id",
|
||||
existsByProviderID: true,
|
||||
providerIDErr: nil,
|
||||
hasInstanceID: false,
|
||||
nodeNameErr: cloudprovider.InstanceNotFound,
|
||||
expectedCalls: []string{"instance-id"},
|
||||
expectedNodeExists: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "does not exist by node name",
|
||||
testName: "provider id returns error",
|
||||
existsByProviderID: false,
|
||||
providerIDErr: errors.New("unimplemented"),
|
||||
existsByNodeName: false,
|
||||
hasInstanceID: true,
|
||||
nodeNameErr: cloudprovider.InstanceNotFound,
|
||||
expectedCalls: []string{"instance-exists-by-provider-id", "external-id"},
|
||||
expectedCalls: []string{"instance-exists-by-provider-id"},
|
||||
expectedNodeExists: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
@ -121,28 +137,137 @@ func TestEnsureNodeExistsByProviderIDOrNodeName(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
fc := &fakecloud.FakeCloud{
|
||||
Exists: tc.existsByNodeName,
|
||||
ExistsByProviderID: tc.existsByProviderID,
|
||||
Err: tc.nodeNameErr,
|
||||
ErrByProviderID: tc.providerIDErr,
|
||||
}
|
||||
|
||||
if tc.hasInstanceID {
|
||||
fc.ExtID = map[types.NodeName]string{
|
||||
types.NodeName(tc.node.Name): "provider-id://a",
|
||||
}
|
||||
}
|
||||
|
||||
instances, _ := fc.Instances()
|
||||
exists, err := ensureNodeExistsByProviderIDOrExternalID(instances, tc.node)
|
||||
assert.NoError(t, err)
|
||||
exists, err := ensureNodeExistsByProviderID(instances, tc.node)
|
||||
assert.Equal(t, err, tc.providerIDErr)
|
||||
|
||||
assert.EqualValues(t, tc.expectedCalls, fc.Calls,
|
||||
"expected cloud provider methods `%v` to be called but `%v` was called ",
|
||||
tc.expectedCalls, fc.Calls)
|
||||
|
||||
assert.False(t, tc.existsByProviderID && tc.existsByProviderID != exists,
|
||||
"expected exist by provider id to be `%t` but got `%t`",
|
||||
assert.Equal(t, tc.expectedNodeExists, exists,
|
||||
"expected exists to be `%t` but got `%t`",
|
||||
tc.existsByProviderID, exists)
|
||||
})
|
||||
}
|
||||
|
||||
assert.False(t, tc.existsByNodeName && tc.existsByNodeName != exists,
|
||||
"expected exist by node name to be `%t` but got `%t`", tc.existsByNodeName, exists)
|
||||
}
|
||||
|
||||
func TestNodeShutdown(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
node *v1.Node
|
||||
existsByProviderID bool
|
||||
shutdown bool
|
||||
}{
|
||||
{
|
||||
testName: "node shutdowned add taint",
|
||||
existsByProviderID: true,
|
||||
shutdown: true,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "node started after shutdown remove taint",
|
||||
existsByProviderID: true,
|
||||
shutdown: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
fc := &fakecloud.FakeCloud{
|
||||
ExistsByProviderID: tc.existsByProviderID,
|
||||
NodeShutdown: tc.shutdown,
|
||||
}
|
||||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{tc.node},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
PatchWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
cloudNodeController := &CloudNodeController{
|
||||
kubeClient: fnh,
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fc,
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
cloudNodeController.Run(wait.NeverStop)
|
||||
|
||||
select {
|
||||
case <-fnh.PatchWaitChan:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Timed out waiting %v for node to be updated", wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes), "Node was not updated")
|
||||
if tc.shutdown {
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes[0].Spec.Taints), "Node Taint was not added")
|
||||
assert.Equal(t, "node.cloudprovider.kubernetes.io/shutdown", fnh.UpdatedNodes[0].Spec.Taints[0].Key, "Node Taint key is not correct")
|
||||
} else {
|
||||
assert.Equal(t, 0, len(fnh.UpdatedNodes[0].Spec.Taints), "Node Taint was not removed after node is back in ready state")
|
||||
}
|
||||
|
||||
assert.False(t, !tc.existsByNodeName && !tc.existsByProviderID && exists,
|
||||
"node is not supposed to exist")
|
||||
})
|
||||
}
|
||||
|
||||
@ -226,7 +351,7 @@ func TestNodeDeleted(t *testing.T) {
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
cloudNodeController.Run()
|
||||
cloudNodeController.Run(wait.NeverStop)
|
||||
|
||||
select {
|
||||
case <-fnh.DeleteWaitChan:
|
||||
@ -643,7 +768,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
cloudNodeController.Run()
|
||||
cloudNodeController.Run(wait.NeverStop)
|
||||
|
||||
<-time.After(2 * time.Second)
|
||||
|
||||
@ -745,15 +870,15 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes), "Node was not updated")
|
||||
assert.Equal(t, "node0", fnh.UpdatedNodes[0].Name, "Node was not updated")
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes[0].Status.Addresses), "Node status unexpectedly updated")
|
||||
assert.Equal(t, 3, len(fnh.UpdatedNodes[0].Status.Addresses), "Node status unexpectedly updated")
|
||||
|
||||
cloudNodeController.Run()
|
||||
cloudNodeController.Run(wait.NeverStop)
|
||||
|
||||
<-time.After(2 * time.Second)
|
||||
|
||||
updatedNodes := fnh.GetUpdatedNodesCopy()
|
||||
|
||||
assert.Equal(t, 1, len(updatedNodes[0].Status.Addresses), 1, "Node Addresses not correctly updated")
|
||||
assert.Equal(t, 3, len(updatedNodes[0].Status.Addresses), "Node Addresses not correctly updated")
|
||||
assert.Equal(t, "10.0.0.1", updatedNodes[0].Status.Addresses[0].Address, "Node Addresses not correctly updated")
|
||||
}
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -301,18 +300,18 @@ func NewReplicaSetControllerRefManager(
|
||||
// If the error is nil, either the reconciliation succeeded, or no
|
||||
// reconciliation was necessary. The list of ReplicaSets that you now own is
|
||||
// returned.
|
||||
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, error) {
|
||||
var claimed []*extensions.ReplicaSet
|
||||
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
|
||||
var claimed []*apps.ReplicaSet
|
||||
var errlist []error
|
||||
|
||||
match := func(obj metav1.Object) bool {
|
||||
return m.Selector.Matches(labels.Set(obj.GetLabels()))
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet))
|
||||
return m.AdoptReplicaSet(obj.(*apps.ReplicaSet))
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.ReleaseReplicaSet(obj.(*extensions.ReplicaSet))
|
||||
return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet))
|
||||
}
|
||||
|
||||
for _, rs := range sets {
|
||||
@ -330,7 +329,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
|
||||
|
||||
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
|
||||
// the error if the patching fails.
|
||||
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error {
|
||||
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error {
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
|
||||
}
|
||||
@ -345,7 +344,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS
|
||||
|
||||
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error {
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error {
|
||||
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager_test.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -52,7 +52,7 @@ func newPod(podName string, label map[string]string, owner metav1.Object) *v1.Po
|
||||
},
|
||||
}
|
||||
if owner != nil {
|
||||
pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, v1beta1.SchemeGroupVersion.WithKind("Fake"))}
|
||||
pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, apps.SchemeGroupVersion.WithKind("Fake"))}
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
@ -25,8 +25,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -45,6 +45,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
|
||||
@ -86,6 +87,11 @@ var UpdateTaintBackoff = wait.Backoff{
|
||||
Jitter: 1.0,
|
||||
}
|
||||
|
||||
var ShutdownTaint = &v1.Taint{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
var (
|
||||
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
@ -812,18 +818,18 @@ func IsPodActive(p *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods.
|
||||
func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
||||
activeFilter := func(rs *extensions.ReplicaSet) bool {
|
||||
func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet {
|
||||
activeFilter := func(rs *apps.ReplicaSet) bool {
|
||||
return rs != nil && *(rs.Spec.Replicas) > 0
|
||||
}
|
||||
return FilterReplicaSets(replicaSets, activeFilter)
|
||||
}
|
||||
|
||||
type filterRS func(rs *extensions.ReplicaSet) bool
|
||||
type filterRS func(rs *apps.ReplicaSet) bool
|
||||
|
||||
// FilterReplicaSets returns replica sets that are filtered by filterFn (all returned ones should match filterFn).
|
||||
func FilterReplicaSets(RSes []*extensions.ReplicaSet, filterFn filterRS) []*extensions.ReplicaSet {
|
||||
var filtered []*extensions.ReplicaSet
|
||||
func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.ReplicaSet {
|
||||
var filtered []*apps.ReplicaSet
|
||||
for i := range RSes {
|
||||
if filterFn(RSes[i]) {
|
||||
filtered = append(filtered, RSes[i])
|
||||
@ -853,7 +859,7 @@ func (o ControllersByCreationTimestamp) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
|
||||
type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
|
||||
type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
@ -866,7 +872,7 @@ func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
|
||||
|
||||
// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
|
||||
// By using the creation timestamp, this sorts from old to new replica sets.
|
||||
type ReplicaSetsBySizeOlder []*extensions.ReplicaSet
|
||||
type ReplicaSetsBySizeOlder []*apps.ReplicaSet
|
||||
|
||||
func (o ReplicaSetsBySizeOlder) Len() int { return len(o) }
|
||||
func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
@ -879,7 +885,7 @@ func (o ReplicaSetsBySizeOlder) Less(i, j int) bool {
|
||||
|
||||
// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
|
||||
// By using the creation timestamp, this sorts from new to old replica sets.
|
||||
type ReplicaSetsBySizeNewer []*extensions.ReplicaSet
|
||||
type ReplicaSetsBySizeNewer []*apps.ReplicaSet
|
||||
|
||||
func (o ReplicaSetsBySizeNewer) Len() int { return len(o) }
|
||||
func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go
generated
vendored
@ -27,11 +27,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@ -41,7 +42,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
@ -62,7 +62,7 @@ func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectat
|
||||
|
||||
func newReplicationController(replicas int) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
@ -122,16 +122,16 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(name string, replicas int) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
func newReplicaSet(name string, replicas int) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -286,7 +286,7 @@ func TestCreatePods(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
podControl := RealPodControl{
|
||||
KubeClient: clientset,
|
||||
@ -417,7 +417,7 @@ func TestSortingActivePods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestActiveReplicaSetsFiltering(t *testing.T) {
|
||||
var replicaSets []*extensions.ReplicaSet
|
||||
var replicaSets []*apps.ReplicaSet
|
||||
replicaSets = append(replicaSets, newReplicaSet("zero", 0))
|
||||
replicaSets = append(replicaSets, nil)
|
||||
replicaSets = append(replicaSets, newReplicaSet("foo", 1))
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go
generated
vendored
@ -69,8 +69,7 @@ type CronJobController struct {
|
||||
func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
@ -89,14 +88,6 @@ func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, e
|
||||
return jm, nil
|
||||
}
|
||||
|
||||
func NewCronJobControllerFromClient(kubeClient clientset.Interface) (*CronJobController, error) {
|
||||
jm, err := NewCronJobController(kubeClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jm, nil
|
||||
}
|
||||
|
||||
// Run the main goroutine responsible for watching and syncing jobs.
|
||||
func (jm *CronJobController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
@ -281,9 +272,8 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
}
|
||||
if tooLate {
|
||||
glog.V(4).Infof("Missed starting window for %s", nameForLog)
|
||||
// TODO: generate an event for a miss. Use a warning level event because it indicates a
|
||||
// problem with the controller (restart or long queue), and is not expected by user either.
|
||||
// Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing
|
||||
recorder.Eventf(sj, v1.EventTypeWarning, "MissSchedule", "Missed scheduled time to start a job: %s", scheduledTime.Format(time.RFC1123Z))
|
||||
// TODO: Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing
|
||||
// the miss every cycle. In order to avoid sending multiple events, and to avoid processing
|
||||
// the sj again and again, we could set a Status.LastMissedTime when we notice a miss.
|
||||
// Then, when we call getRecentUnmetScheduleTimes, we can take max(creationTimestamp,
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go
generated
vendored
@ -89,24 +89,6 @@ func groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job {
|
||||
return jobsBySj
|
||||
}
|
||||
|
||||
// getNextStartTimeAfter gets the latest scheduled start time that is less than "now", or an error.
|
||||
func getNextStartTimeAfter(schedule string, now time.Time) (time.Time, error) {
|
||||
// Using robfig/cron for cron scheduled parsing and next runtime
|
||||
// computation. Not using the entire library because:
|
||||
// - I want to detect when we missed a runtime due to being down.
|
||||
// - How do I set the time such that I can detect the last known runtime?
|
||||
// - I guess the functions could launch a go-routine to start the job and
|
||||
// then return.
|
||||
// How to handle concurrency control.
|
||||
// How to detect changes to schedules or deleted schedules and then
|
||||
// update the jobs?
|
||||
sched, err := cron.Parse(schedule)
|
||||
if err != nil {
|
||||
return time.Unix(0, 0), fmt.Errorf("Unparseable schedule: %s : %s", schedule, err)
|
||||
}
|
||||
return sched.Next(now), nil
|
||||
}
|
||||
|
||||
// getRecentUnmetScheduleTimes gets a slice of times (from oldest to latest) that have passed when a Job should have started but did not.
|
||||
//
|
||||
// If there are too many (>100) unstarted times, just give up and return an empty slice.
|
||||
@ -171,9 +153,6 @@ func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time
|
||||
|
||||
// getJobFromTemplate makes a Job from a CronJob
|
||||
func getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) {
|
||||
// TODO: consider adding the following labels:
|
||||
// nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience
|
||||
// scheduled-job-name=$SJ_NAME -- for user convenience
|
||||
labels := copyLabels(&sj.Spec.JobTemplate)
|
||||
annotations := copyAnnotations(&sj.Spec.JobTemplate)
|
||||
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
@ -23,7 +23,7 @@ go_library(
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -33,6 +33,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
@ -68,6 +69,8 @@ go_test(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
|
300
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
300
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
@ -23,11 +23,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -53,10 +56,8 @@ import (
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -112,6 +113,8 @@ type DaemonSetsController struct {
|
||||
historyStoreSynced cache.InformerSynced
|
||||
// podLister get list/get pods from the shared informers's store
|
||||
podLister corelisters.PodLister
|
||||
// podNodeIndex indexes pods by their nodeName
|
||||
podNodeIndex cache.Indexer
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podStoreSynced cache.InformerSynced
|
||||
@ -134,8 +137,7 @@ type DaemonSetsController struct {
|
||||
func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
@ -191,6 +193,12 @@ func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
DeleteFunc: dsc.deletePod,
|
||||
})
|
||||
dsc.podLister = podInformer.Lister()
|
||||
|
||||
// This custom indexer will index pods based on their NodeName which will decrease the amount of pods we need to get in simulate() call.
|
||||
podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{
|
||||
"nodeName": indexByPodNodeName,
|
||||
})
|
||||
dsc.podNodeIndex = podInformer.Informer().GetIndexer()
|
||||
dsc.podStoreSynced = podInformer.Informer().HasSynced
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -207,6 +215,18 @@ func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
return dsc, nil
|
||||
}
|
||||
|
||||
func indexByPodNodeName(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, nil
|
||||
}
|
||||
// We are only interested in active pods with nodeName set
|
||||
if len(pod.Spec.NodeName) == 0 || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
return []string{}, nil
|
||||
}
|
||||
return []string{pod.Spec.NodeName}, nil
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
ds, ok := obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
@ -763,7 +783,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
|
||||
return cm.ClaimPods(pods)
|
||||
}
|
||||
|
||||
// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
||||
// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) created for the nodes.
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned Pods are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
@ -775,9 +795,16 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[s
|
||||
// Group Pods by Node name.
|
||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||
for _, pod := range claimedPods {
|
||||
nodeName := pod.Spec.NodeName
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
pod.Namespace, pod.Name, ds.Namespace, ds.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
|
||||
}
|
||||
|
||||
return nodeToDaemonPods, nil
|
||||
}
|
||||
|
||||
@ -802,12 +829,76 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
|
||||
return ds
|
||||
}
|
||||
|
||||
// podsShouldBeOnNode figures out the DaemonSet pods to be created and deleted on the given node:
|
||||
// - nodesNeedingDaemonPods: the pods need to start on the node
|
||||
// - podsToDelete: the Pods need to be deleted on the node
|
||||
// - failedPodsObserved: the number of failed pods on node
|
||||
// - err: unexpected error
|
||||
func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
node *v1.Node,
|
||||
nodeToDaemonPods map[string][]*v1.Pod,
|
||||
ds *apps.DaemonSet,
|
||||
) (nodesNeedingDaemonPods, podsToDelete []string, failedPodsObserved int, err error) {
|
||||
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
dsKey, _ := cache.MetaNamespaceKeyFunc(ds)
|
||||
dsc.removeSuspendedDaemonPods(node.Name, dsKey)
|
||||
|
||||
switch {
|
||||
case wantToRun && !shouldSchedule:
|
||||
// If daemon pod is supposed to run, but can not be scheduled, add to suspended list.
|
||||
dsc.addSuspendedDaemonPods(node.Name, dsKey)
|
||||
case shouldSchedule && !exists:
|
||||
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
|
||||
case shouldContinueRunning:
|
||||
// If a daemon pod failed, delete it
|
||||
// If there's non-daemon pods left on this node, we will create it in the next sync loop
|
||||
var daemonPodsRunning []*v1.Pod
|
||||
for _, pod := range daemonPods {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
glog.V(2).Infof(msg)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
failedPodsObserved++
|
||||
} else {
|
||||
daemonPodsRunning = append(daemonPodsRunning, pod)
|
||||
}
|
||||
}
|
||||
// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
|
||||
// Sort the daemon pods by creation time, so the oldest is preserved.
|
||||
if len(daemonPodsRunning) > 1 {
|
||||
sort.Sort(podByCreationTimestampAndPhase(daemonPodsRunning))
|
||||
for i := 1; i < len(daemonPodsRunning); i++ {
|
||||
podsToDelete = append(podsToDelete, daemonPodsRunning[i].Name)
|
||||
}
|
||||
}
|
||||
case !shouldContinueRunning && exists:
|
||||
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
|
||||
for _, pod := range daemonPods {
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nodesNeedingDaemonPods, podsToDelete, failedPodsObserved, nil
|
||||
}
|
||||
|
||||
// manage manages the scheduling and running of Pods of ds on nodes.
|
||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
|
||||
// which nodes should not run a Pod of ds but currently running one, it calls function
|
||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
|
||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
// Find out which nodes are running the daemon pods controlled by ds.
|
||||
// Find out the pods which are created for the nodes by DaemonSet.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -822,55 +913,16 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
var failedPodsObserved int
|
||||
for _, node := range nodeList {
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode, failedPodsObservedOnNode, err := dsc.podsShouldBeOnNode(
|
||||
node, nodeToDaemonPods, ds)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
dsKey, _ := cache.MetaNamespaceKeyFunc(ds)
|
||||
dsc.removeSuspendedDaemonPods(node.Name, dsKey)
|
||||
|
||||
switch {
|
||||
case wantToRun && !shouldSchedule:
|
||||
// If daemon pod is supposed to run, but can not be scheduled, add to suspended list.
|
||||
dsc.addSuspendedDaemonPods(node.Name, dsKey)
|
||||
case shouldSchedule && !exists:
|
||||
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
|
||||
case shouldContinueRunning:
|
||||
// If a daemon pod failed, delete it
|
||||
// If there's non-daemon pods left on this node, we will create it in the next sync loop
|
||||
var daemonPodsRunning []*v1.Pod
|
||||
for _, pod := range daemonPods {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
glog.V(2).Infof(msg)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
failedPodsObserved++
|
||||
} else {
|
||||
daemonPodsRunning = append(daemonPodsRunning, pod)
|
||||
}
|
||||
}
|
||||
// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
|
||||
// Sort the daemon pods by creation time, so the oldest is preserved.
|
||||
if len(daemonPodsRunning) > 1 {
|
||||
sort.Sort(podByCreationTimestamp(daemonPodsRunning))
|
||||
for i := 1; i < len(daemonPodsRunning); i++ {
|
||||
podsToDelete = append(podsToDelete, daemonPodsRunning[i].Name)
|
||||
}
|
||||
}
|
||||
case !shouldContinueRunning && exists:
|
||||
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
|
||||
for _, pod := range daemonPods {
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
}
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodesNeedingDaemonPodsOnNode...)
|
||||
podsToDelete = append(podsToDelete, podsToDeleteOnNode...)
|
||||
failedPodsObserved += failedPodsObservedOnNode
|
||||
}
|
||||
|
||||
// Label new pods using the hash label value of the current history when creating them
|
||||
@ -934,7 +986,26 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
for i := pos; i < pos+batchSize; i++ {
|
||||
go func(ix int) {
|
||||
defer createWait.Done()
|
||||
err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
var err error
|
||||
|
||||
podTemplate := &template
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
podTemplate = template.DeepCopy()
|
||||
// The pod's NodeAffinity will be updated to make sure the Pod is bound
|
||||
// to the target node by default scheduler. It is safe to do so because there
|
||||
// should be no conflicting node affinity with the target node.
|
||||
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
|
||||
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
|
||||
podTemplate.Spec.Tolerations = util.AppendNoScheduleTolerationIfNotExist(podTemplate.Spec.Tolerations)
|
||||
|
||||
err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
|
||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
} else {
|
||||
err = dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, podTemplate,
|
||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
}
|
||||
|
||||
if err != nil && errors.IsTimeout(err) {
|
||||
// Pod is created but its initialization has timed out.
|
||||
// If the initialization is successful eventually, the
|
||||
@ -1058,7 +1129,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
currentNumberScheduled++
|
||||
// Sort the daemon pods by creation time, so that the oldest is first.
|
||||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
sort.Sort(podByCreationTimestampAndPhase(daemonPods))
|
||||
pod := daemonPods[0]
|
||||
if podutil.IsPodReady(pod) {
|
||||
numberReady++
|
||||
@ -1095,7 +1166,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -1126,6 +1197,18 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
// If the DaemonSet is being deleted (either by foreground deletion or
|
||||
// orphan deletion), we cannot be sure if the DaemonSet history objects
|
||||
// it owned still exist -- those history objects can either be deleted
|
||||
// or orphaned. Garbage collector doesn't guarantee that it will delete
|
||||
// DaemonSet pods before deleting DaemonSet history objects, because
|
||||
// DaemonSet history doesn't own DaemonSet pods. We cannot reliably
|
||||
// calculate the status of a DaemonSet being deleted. Therefore, return
|
||||
// here without updating status for the DaemonSet being deleted.
|
||||
if ds.DeletionTimestamp != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct histories of the DaemonSet, and get the hash of current history
|
||||
cur, old, err := dsc.constructHistory(ds)
|
||||
if err != nil {
|
||||
@ -1133,7 +1216,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
||||
if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
if !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
// Only update status.
|
||||
return dsc.updateDaemonSetStatus(ds, hash)
|
||||
}
|
||||
@ -1209,30 +1292,27 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *app
|
||||
})
|
||||
}
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
podList, err := dsc.podLister.List(labels.Everything())
|
||||
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, pod := range podList {
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
continue
|
||||
}
|
||||
// ignore pods that belong to the daemonset when taking into account whether
|
||||
// a daemonset should bind to a node.
|
||||
if metav1.IsControlledBy(pod, ds) {
|
||||
continue
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(pods...)
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
for _, obj := range objects {
|
||||
// Ignore pods that belong to the daemonset when taking into account whether a daemonset should bind to a node.
|
||||
// TODO: replace this with metav1.IsControlledBy() in 1.12
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if isControlledByDaemonSet(pod, ds.GetUID()) {
|
||||
continue
|
||||
}
|
||||
nodeInfo.AddPod(pod)
|
||||
}
|
||||
|
||||
_, reasons, err := Predicates(newPod, nodeInfo)
|
||||
return reasons, nodeInfo, err
|
||||
}
|
||||
@ -1267,6 +1347,9 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
return false, false, false, err
|
||||
}
|
||||
|
||||
// TODO(k82cn): When 'ScheduleDaemonSetPods' upgrade to beta or GA, remove unnecessary check on failure reason,
|
||||
// e.g. InsufficientResourceError; and simplify "wantToRun, shouldSchedule, shouldContinueRunning"
|
||||
// into one result, e.g. selectedNode.
|
||||
var insufficientResourceErr error
|
||||
for _, r := range reasons {
|
||||
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
@ -1341,11 +1424,50 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
||||
return newPod
|
||||
}
|
||||
|
||||
// nodeSelectionPredicates runs a set of predicates that select candidate nodes for the DaemonSet;
|
||||
// the predicates include:
|
||||
// - PodFitsHost: checks pod's NodeName against node
|
||||
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
|
||||
func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
|
||||
fit, reasons, err = predicates.PodMatchNodeSelector(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
// Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
|
||||
// and PodToleratesNodeTaints predicate
|
||||
func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) && kubelettypes.IsCriticalPod(pod)
|
||||
|
||||
// If ScheduleDaemonSetPods is enabled, only check nodeSelector and nodeAffinity.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
fit, reasons, err := nodeSelectionPredicates(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCriticalPod(pod)
|
||||
|
||||
fit, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
@ -1384,14 +1506,32 @@ func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
type podByCreationTimestamp []*v1.Pod
|
||||
type podByCreationTimestampAndPhase []*v1.Pod
|
||||
|
||||
func (o podByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o podByCreationTimestampAndPhase) Len() int { return len(o) }
|
||||
func (o podByCreationTimestampAndPhase) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o podByCreationTimestampAndPhase) Less(i, j int) bool {
|
||||
// Scheduled Pod first
|
||||
if len(o[i].Spec.NodeName) != 0 && len(o[j].Spec.NodeName) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(o[i].Spec.NodeName) == 0 && len(o[j].Spec.NodeName) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
func (o podByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
func isControlledByDaemonSet(p *v1.Pod, uuid types.UID) bool {
|
||||
for _, ref := range p.OwnerReferences {
|
||||
if ref.Controller != nil && *ref.Controller && ref.UID == uuid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
143
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
143
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
@ -43,6 +43,8 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
@ -145,7 +147,7 @@ func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
||||
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
@ -195,7 +197,7 @@ func newPod(podName string, nodeName string, label map[string]string, ds *apps.D
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Labels: newLabels,
|
||||
@ -267,6 +269,31 @@ func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if err := f.FakePodControl.CreatePodsWithControllerRef(namespace, template, object, controllerRef); err != nil {
|
||||
return fmt.Errorf("failed to create pod for DaemonSet")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: template.Labels,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
pod.Name = names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%p-", pod))
|
||||
|
||||
if err := legacyscheme.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil {
|
||||
return fmt.Errorf("unable to convert pod template: %v", err)
|
||||
}
|
||||
|
||||
f.podStore.Update(pod)
|
||||
f.podIDMap[pod.Name] = pod
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
@ -423,6 +450,97 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// When ScheduleDaemonSetPods is enabled, DaemonSets without node selectors should
|
||||
// launch pods on every node by NodeAffinity.
|
||||
func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
t.Skip("disabled for 1.10")
|
||||
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
// Rollback feature gate.
|
||||
defer func() {
|
||||
if !enabled {
|
||||
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=false")
|
||||
}
|
||||
}()
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=true")
|
||||
|
||||
nodeNum := 5
|
||||
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, nodeNum, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0)
|
||||
|
||||
// Check for ScheduleDaemonSetPods feature
|
||||
if len(podControl.podIDMap) != nodeNum {
|
||||
t.Fatalf("failed to create pods for DaemonSet when enabled ScheduleDaemonSetPods.")
|
||||
}
|
||||
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
for _, node := range manager.nodeStore.List() {
|
||||
n := node.(*v1.Node)
|
||||
nodeMap[n.Name] = n
|
||||
}
|
||||
|
||||
if len(nodeMap) != nodeNum {
|
||||
t.Fatalf("not enough nodes in the store, expected: %v, got: %v",
|
||||
nodeNum, len(nodeMap))
|
||||
}
|
||||
|
||||
for _, pod := range podControl.podIDMap {
|
||||
if len(pod.Spec.NodeName) != 0 {
|
||||
t.Fatalf("the hostname of pod %v should be empty, but got %s",
|
||||
pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
if pod.Spec.Affinity == nil {
|
||||
t.Fatalf("the Affinity of pod %s is nil.", pod.Name)
|
||||
}
|
||||
if pod.Spec.Affinity.NodeAffinity == nil {
|
||||
t.Fatalf("the NodeAffinity of pod %s is nil.", pod.Name)
|
||||
}
|
||||
|
||||
nodeSelector := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
if nodeSelector == nil {
|
||||
t.Fatalf("the node selector of pod %s is nil.", pod.Name)
|
||||
}
|
||||
if len(nodeSelector.NodeSelectorTerms) != 1 {
|
||||
t.Fatalf("incorrect node selector terms number of pod %s, expected: 1, got: %d.",
|
||||
pod.Name, len(nodeSelector.NodeSelectorTerms))
|
||||
}
|
||||
|
||||
if len(nodeSelector.NodeSelectorTerms[0].MatchExpressions) != 1 {
|
||||
t.Fatalf("incorrect expression number of pod %s node selector term, expected: 1, got: %d.",
|
||||
pod.Name, len(nodeSelector.NodeSelectorTerms[0].MatchExpressions))
|
||||
}
|
||||
|
||||
exp := nodeSelector.NodeSelectorTerms[0].MatchExpressions[0]
|
||||
if exp.Key == kubeletapis.LabelHostname {
|
||||
if exp.Operator != v1.NodeSelectorOpIn {
|
||||
t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
|
||||
}
|
||||
|
||||
if len(exp.Values) != 1 {
|
||||
t.Fatalf("incorrect hostname in node affinity: expected 1, got %v", len(exp.Values))
|
||||
}
|
||||
|
||||
delete(nodeMap, exp.Values[0])
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodeMap) != 0 {
|
||||
t.Fatalf("did not foud pods on nodes %+v", nodeMap)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Simulate a cluster with 100 nodes, but simulate a limit (like a quota limit)
|
||||
// of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass
|
||||
func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
|
||||
@ -1545,6 +1663,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
predicateName string
|
||||
podsOnNode []*v1.Pod
|
||||
nodeCondition []v1.NodeCondition
|
||||
nodeUnschedulable bool
|
||||
ds *apps.DaemonSet
|
||||
wantToRun, shouldSchedule, shouldContinueRunning bool
|
||||
err error
|
||||
@ -1800,6 +1919,24 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPodOnUnscheduableNode",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("", "50M", "0.5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeUnschedulable: true,
|
||||
wantToRun: true,
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
@ -1807,6 +1944,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
node := newNode("test-node", simpleDaemonSetLabel)
|
||||
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
|
||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||
node.Spec.Unschedulable = c.nodeUnschedulable
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
@ -1815,6 +1953,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
for _, p := range c.podsOnNode {
|
||||
manager.podStore.Add(p)
|
||||
p.Spec.NodeName = "test-node"
|
||||
manager.podNodeIndex.Add(p)
|
||||
}
|
||||
c.ds.Spec.UpdateStrategy = *strategy
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
@ -43,8 +44,12 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
115
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
115
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@ -133,3 +134,117 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
||||
}
|
||||
return availablePods, unavailablePods
|
||||
}
|
||||
|
||||
// ReplaceDaemonSetPodNodeNameNodeAffinity replaces the RequiredDuringSchedulingIgnoredDuringExecution
|
||||
// NodeAffinity of the given affinity with a new NodeAffinity that selects the given nodeName.
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
|
||||
nodeSelector := &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if affinity == nil {
|
||||
return &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if affinity.NodeAffinity == nil {
|
||||
affinity.NodeAffinity = &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
|
||||
}
|
||||
return affinity
|
||||
}
|
||||
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodeSelector
|
||||
return affinity
|
||||
}
|
||||
|
||||
// Replace node selector with the new one.
|
||||
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
|
||||
},
|
||||
}
|
||||
|
||||
return affinity
|
||||
}
|
||||
|
||||
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
|
||||
// no changes to `.spec.tolerations`.
|
||||
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableTaintExist := false
|
||||
|
||||
for _, t := range tolerations {
|
||||
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
|
||||
unschedulableTaintExist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !unschedulableTaintExist {
|
||||
tolerations = append(tolerations, unschedulableToleration)
|
||||
}
|
||||
|
||||
return tolerations
|
||||
}
|
||||
|
||||
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
|
||||
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
|
||||
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
|
||||
func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
if len(pod.Spec.NodeName) != 0 {
|
||||
return pod.Spec.NodeName, nil
|
||||
}
|
||||
|
||||
// If ScheduleDaemonSetPods was enabled before, retrieve node name of unscheduled pods from NodeAffinity
|
||||
if pod.Spec.Affinity == nil ||
|
||||
pod.Spec.Affinity.NodeAffinity == nil ||
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
return "", fmt.Errorf("no spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution for pod %s/%s",
|
||||
pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
terms := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
if len(terms) < 1 {
|
||||
return "", fmt.Errorf("no nodeSelectorTerms in requiredDuringSchedulingIgnoredDuringExecution of pod %s/%s",
|
||||
pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no node name found for pod %s/%s", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
429
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
429
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
@ -18,12 +18,17 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
@ -168,3 +173,427 @@ func int64Ptr(i int) *int64 {
|
||||
li := int64(i)
|
||||
return &li
|
||||
}
|
||||
|
||||
func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
tests := []struct {
|
||||
affinity *v1.Affinity
|
||||
hostname string
|
||||
expected *v1.Affinity
|
||||
}{
|
||||
{
|
||||
affinity: nil,
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: nil,
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "hostname",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
// NOTE: Only `metadata.name` is valid key in `MatchFields` in 1.11;
|
||||
// added this case for compatibility: the feature works as normal
|
||||
// when new Keys introduced.
|
||||
Key: "metadata.foo",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
got := ReplaceDaemonSetPodNodeNameNodeAffinity(test.affinity, test.hostname)
|
||||
if !reflect.DeepEqual(test.expected, got) {
|
||||
t.Errorf("Failed to append NodeAffinity in case %d, got: %v, expected: %v",
|
||||
i, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) {
|
||||
for _, fg := range gates {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTargetNodeName(t *testing.T) {
|
||||
testFun := func(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeName string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
},
|
||||
},
|
||||
nodeName: "node-1",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeName: "node-1",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod4",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got, err := GetTargetNodeName(test.pod)
|
||||
if test.expectedErr != (err != nil) {
|
||||
t.Errorf("Unexpected error, expectedErr: %v, err: %v", test.expectedErr, err)
|
||||
} else if !test.expectedErr {
|
||||
if test.nodeName != got {
|
||||
t.Errorf("Failed to get target node name, got: %v, expected: %v", got, test.nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forEachFeatureGate(t, testFun, features.ScheduleDaemonSetPods)
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
@ -23,23 +23,23 @@ go_library(
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
@ -58,7 +58,6 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/apps/install:go_default_library",
|
||||
"//pkg/apis/authentication/install:go_default_library",
|
||||
"//pkg/apis/authorization/install:go_default_library",
|
||||
@ -66,13 +65,13 @@ go_test(
|
||||
"//pkg/apis/batch/install:go_default_library",
|
||||
"//pkg/apis/certificates/install:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/apis/extensions/install:go_default_library",
|
||||
"//pkg/apis/policy/install:go_default_library",
|
||||
"//pkg/apis/rbac/install:go_default_library",
|
||||
"//pkg/apis/settings/install:go_default_library",
|
||||
"//pkg/apis/storage/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
77
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
77
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
@ -27,21 +27,21 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
@ -60,7 +60,7 @@ const (
|
||||
)
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
var controllerKind = extensions.SchemeGroupVersion.WithKind("Deployment")
|
||||
var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment")
|
||||
|
||||
// DeploymentController is responsible for synchronizing Deployment objects stored
|
||||
// in the system with actual running replica sets and pods.
|
||||
@ -73,12 +73,12 @@ type DeploymentController struct {
|
||||
// To allow injection of syncDeployment for testing.
|
||||
syncHandler func(dKey string) error
|
||||
// used for unit testing
|
||||
enqueueDeployment func(deployment *extensions.Deployment)
|
||||
enqueueDeployment func(deployment *apps.Deployment)
|
||||
|
||||
// dLister can list/get deployments from the shared informer's store
|
||||
dLister extensionslisters.DeploymentLister
|
||||
dLister appslisters.DeploymentLister
|
||||
// rsLister can list/get replica sets from the shared informer's store
|
||||
rsLister extensionslisters.ReplicaSetLister
|
||||
rsLister appslisters.ReplicaSetLister
|
||||
// podLister can list/get pods from the shared informer's store
|
||||
podLister corelisters.PodLister
|
||||
|
||||
@ -97,11 +97,10 @@ type DeploymentController struct {
|
||||
}
|
||||
|
||||
// NewDeploymentController creates a new DeploymentController.
|
||||
func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
||||
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
@ -165,27 +164,27 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) addDeployment(obj interface{}) {
|
||||
d := obj.(*extensions.Deployment)
|
||||
d := obj.(*apps.Deployment)
|
||||
glog.V(4).Infof("Adding deployment %s", d.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) updateDeployment(old, cur interface{}) {
|
||||
oldD := old.(*extensions.Deployment)
|
||||
curD := cur.(*extensions.Deployment)
|
||||
oldD := old.(*apps.Deployment)
|
||||
curD := cur.(*apps.Deployment)
|
||||
glog.V(4).Infof("Updating deployment %s", oldD.Name)
|
||||
dc.enqueueDeployment(curD)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) deleteDeployment(obj interface{}) {
|
||||
d, ok := obj.(*extensions.Deployment)
|
||||
d, ok := obj.(*apps.Deployment)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
d, ok = tombstone.Obj.(*extensions.Deployment)
|
||||
d, ok = tombstone.Obj.(*apps.Deployment)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Deployment %#v", obj))
|
||||
return
|
||||
@ -197,7 +196,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) {
|
||||
|
||||
// addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created.
|
||||
func (dc *DeploymentController) addReplicaSet(obj interface{}) {
|
||||
rs := obj.(*extensions.ReplicaSet)
|
||||
rs := obj.(*apps.ReplicaSet)
|
||||
|
||||
if rs.DeletionTimestamp != nil {
|
||||
// On a restart of the controller manager, it's possible for an object to
|
||||
@ -231,7 +230,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
|
||||
|
||||
// getDeploymentsForReplicaSet returns a list of Deployments that potentially
|
||||
// match a ReplicaSet.
|
||||
func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.ReplicaSet) []*extensions.Deployment {
|
||||
func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) []*apps.Deployment {
|
||||
deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs)
|
||||
if err != nil || len(deployments) == 0 {
|
||||
return nil
|
||||
@ -251,11 +250,11 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.Repli
|
||||
|
||||
// updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet
|
||||
// is updated and wake them up. If the anything of the ReplicaSets have changed, we need to
|
||||
// awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet
|
||||
// awaken both the old and new deployments. old and cur must be *apps.ReplicaSet
|
||||
// types.
|
||||
func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
curRS := cur.(*extensions.ReplicaSet)
|
||||
oldRS := old.(*extensions.ReplicaSet)
|
||||
curRS := cur.(*apps.ReplicaSet)
|
||||
oldRS := old.(*apps.ReplicaSet)
|
||||
if curRS.ResourceVersion == oldRS.ResourceVersion {
|
||||
// Periodic resync will send update events for all known replica sets.
|
||||
// Two different versions of the same replica set will always have different RVs.
|
||||
@ -299,10 +298,10 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// deleteReplicaSet enqueues the deployment that manages a ReplicaSet when
|
||||
// the ReplicaSet is deleted. obj could be an *extensions.ReplicaSet, or
|
||||
// the ReplicaSet is deleted. obj could be an *apps.ReplicaSet, or
|
||||
// a DeletionFinalStateUnknown marker item.
|
||||
func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
|
||||
rs, ok := obj.(*extensions.ReplicaSet)
|
||||
rs, ok := obj.(*apps.ReplicaSet)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@ -314,7 +313,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
rs, ok = tombstone.Obj.(*extensions.ReplicaSet)
|
||||
rs, ok = tombstone.Obj.(*apps.ReplicaSet)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ReplicaSet %#v", obj))
|
||||
return
|
||||
@ -355,9 +354,9 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == extensions.RecreateDeploymentStrategyType {
|
||||
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
|
||||
// Sync if this Deployment now has no more Pods.
|
||||
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.ExtensionsV1beta1()))
|
||||
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -375,7 +374,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) {
|
||||
func (dc *DeploymentController) enqueue(deployment *apps.Deployment) {
|
||||
key, err := controller.KeyFunc(deployment)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
|
||||
@ -385,7 +384,7 @@ func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) {
|
||||
dc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deployment) {
|
||||
func (dc *DeploymentController) enqueueRateLimited(deployment *apps.Deployment) {
|
||||
key, err := controller.KeyFunc(deployment)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
|
||||
@ -396,7 +395,7 @@ func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deploy
|
||||
}
|
||||
|
||||
// enqueueAfter will enqueue a deployment after the provided amount of time.
|
||||
func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment, after time.Duration) {
|
||||
func (dc *DeploymentController) enqueueAfter(deployment *apps.Deployment, after time.Duration) {
|
||||
key, err := controller.KeyFunc(deployment)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
|
||||
@ -407,16 +406,16 @@ func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment,
|
||||
}
|
||||
|
||||
// getDeploymentForPod returns the deployment managing the given Pod.
|
||||
func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Deployment {
|
||||
func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deployment {
|
||||
// Find the owning replica set
|
||||
var rs *extensions.ReplicaSet
|
||||
var rs *apps.ReplicaSet
|
||||
var err error
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
// No controller owns this Pod.
|
||||
return nil
|
||||
}
|
||||
if controllerRef.Kind != extensions.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
|
||||
if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
|
||||
// Not a pod owned by a replica set.
|
||||
return nil
|
||||
}
|
||||
@ -437,7 +436,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Dep
|
||||
// resolveControllerRef returns the controller referenced by a ControllerRef,
|
||||
// or nil if the ControllerRef could not be resolved to a matching controller
|
||||
// of the correct Kind.
|
||||
func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.Deployment {
|
||||
func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment {
|
||||
// We can't look up by UID, so look up by Name and then verify UID.
|
||||
// Don't even try to look up by Name if it's the wrong Kind.
|
||||
if controllerRef.Kind != controllerKind.Kind {
|
||||
@ -495,7 +494,7 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
|
||||
// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
|
||||
// ControllerRef by adopting and orphaning.
|
||||
// It returns the list of ReplicaSets that this Deployment should manage.
|
||||
func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deployment) ([]*extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) ([]*apps.ReplicaSet, error) {
|
||||
// List all ReplicaSets to find those we own but that no longer match our
|
||||
// selector. They will be orphaned by ClaimReplicaSets().
|
||||
rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
|
||||
@ -509,7 +508,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy
|
||||
// If any adoptions are attempted, we should first recheck for deletion with
|
||||
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
|
||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||
fresh, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -526,7 +525,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy
|
||||
//
|
||||
// It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
|
||||
// according to the Pod's ControllerRef.
|
||||
func (dc *DeploymentController) getPodMapForDeployment(d *extensions.Deployment, rsList []*extensions.ReplicaSet) (map[types.UID]*v1.PodList, error) {
|
||||
func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID]*v1.PodList, error) {
|
||||
// Get all Pods that potentially belong to this Deployment.
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
@ -587,7 +586,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||
if d.Status.ObservedGeneration < d.Generation {
|
||||
d.Status.ObservedGeneration = d.Generation
|
||||
dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -626,7 +625,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
// rollback is not re-entrant in case the underlying replica sets are updated with a new
|
||||
// revision so we should ensure that we won't proceed to update replica sets until we
|
||||
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
|
||||
if d.Spec.RollbackTo != nil {
|
||||
if getRollbackTo(d) != nil {
|
||||
return dc.rollback(d, rsList, podMap)
|
||||
}
|
||||
|
||||
@ -639,9 +638,9 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
|
||||
switch d.Spec.Strategy.Type {
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
case apps.RecreateDeploymentStrategyType:
|
||||
return dc.rolloutRecreate(d, rsList, podMap)
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
case apps.RollingUpdateDeploymentStrategyType:
|
||||
return dc.rolloutRolling(d, rsList, podMap)
|
||||
}
|
||||
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
|
||||
|
78
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
78
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -31,7 +32,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
_ "k8s.io/kubernetes/pkg/apis/apps/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/authentication/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/authorization/install"
|
||||
@ -39,7 +39,6 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/batch/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/certificates/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/policy/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/settings/install"
|
||||
@ -53,14 +52,14 @@ var (
|
||||
noTimestamp = metav1.Time{}
|
||||
)
|
||||
|
||||
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
CreationTimestamp: timestamp,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: selector},
|
||||
Template: v1.PodTemplateSpec{},
|
||||
@ -68,27 +67,27 @@ func rs(name string, replicas int, selector map[string]string, timestamp metav1.
|
||||
}
|
||||
}
|
||||
|
||||
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *extensions.ReplicaSet {
|
||||
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *apps.ReplicaSet {
|
||||
rs := rs(name, specReplicas, selector, noTimestamp)
|
||||
rs.Status = extensions.ReplicaSetStatus{
|
||||
rs.Status = apps.ReplicaSetStatus{
|
||||
Replicas: int32(statusReplicas),
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
|
||||
d := extensions.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(extensions.GroupName).GroupVersion.String()},
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
|
||||
d := apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
||||
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
||||
},
|
||||
@ -119,8 +118,8 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
||||
return &d
|
||||
}
|
||||
|
||||
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
UID: uuid.NewUUID(),
|
||||
@ -128,7 +127,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi
|
||||
Labels: d.Spec.Selector.MatchLabels,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: d.Spec.Selector,
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Template: d.Spec.Template,
|
||||
@ -136,7 +135,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi
|
||||
}
|
||||
}
|
||||
|
||||
func getKey(d *extensions.Deployment, t *testing.T) string {
|
||||
func getKey(d *apps.Deployment, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(d); err != nil {
|
||||
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
|
||||
return ""
|
||||
@ -150,8 +149,8 @@ type fixture struct {
|
||||
|
||||
client *fake.Clientset
|
||||
// Objects to put in the store.
|
||||
dLister []*extensions.Deployment
|
||||
rsLister []*extensions.ReplicaSet
|
||||
dLister []*apps.Deployment
|
||||
rsLister []*apps.ReplicaSet
|
||||
podLister []*v1.Pod
|
||||
|
||||
// Actions expected to happen on the client. Objects from here are also
|
||||
@ -160,23 +159,23 @@ type fixture struct {
|
||||
objects []runtime.Object
|
||||
}
|
||||
|
||||
func (f *fixture) expectGetDeploymentAction(d *extensions.Deployment) {
|
||||
func (f *fixture) expectGetDeploymentAction(d *apps.Deployment) {
|
||||
action := core.NewGetAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d.Name)
|
||||
f.actions = append(f.actions, action)
|
||||
}
|
||||
|
||||
func (f *fixture) expectUpdateDeploymentStatusAction(d *extensions.Deployment) {
|
||||
func (f *fixture) expectUpdateDeploymentStatusAction(d *apps.Deployment) {
|
||||
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
|
||||
action.Subresource = "status"
|
||||
f.actions = append(f.actions, action)
|
||||
}
|
||||
|
||||
func (f *fixture) expectUpdateDeploymentAction(d *extensions.Deployment) {
|
||||
func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) {
|
||||
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
|
||||
f.actions = append(f.actions, action)
|
||||
}
|
||||
|
||||
func (f *fixture) expectCreateRSAction(rs *extensions.ReplicaSet) {
|
||||
func (f *fixture) expectCreateRSAction(rs *apps.ReplicaSet) {
|
||||
f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs))
|
||||
}
|
||||
|
||||
@ -190,7 +189,7 @@ func newFixture(t *testing.T) *fixture {
|
||||
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory, error) {
|
||||
f.client = fake.NewSimpleClientset(f.objects...)
|
||||
informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc())
|
||||
c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
|
||||
c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -199,10 +198,10 @@ func (f *fixture) newController() (*DeploymentController, informers.SharedInform
|
||||
c.rsListerSynced = alwaysReady
|
||||
c.podListerSynced = alwaysReady
|
||||
for _, d := range f.dLister {
|
||||
informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d)
|
||||
informers.Apps().V1().Deployments().Informer().GetIndexer().Add(d)
|
||||
}
|
||||
for _, rs := range f.rsLister {
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
}
|
||||
for _, pod := range f.podLister {
|
||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||
@ -345,20 +344,19 @@ func TestReentrantRollback(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
|
||||
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
|
||||
d.Spec.RollbackTo = &extensions.RollbackConfig{Revision: 0}
|
||||
d.Annotations = map[string]string{util.RevisionAnnotation: "2"}
|
||||
setRollbackTo(d, &extensions.RollbackConfig{Revision: 0})
|
||||
f.dLister = append(f.dLister, d)
|
||||
|
||||
rs1 := newReplicaSet(d, "deploymentrs-old", 0)
|
||||
rs1.Annotations = map[string]string{util.RevisionAnnotation: "1"}
|
||||
one := int64(1)
|
||||
rs1.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
rs1.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
rs1.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
|
||||
rs2 := newReplicaSet(d, "deploymentrs-new", 1)
|
||||
rs2.Annotations = map[string]string{util.RevisionAnnotation: "2"}
|
||||
rs2.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
rs2.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
|
||||
f.rsLister = append(f.rsLister, rs1, rs2)
|
||||
f.objects = append(f.objects, d, rs1, rs2)
|
||||
@ -376,7 +374,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
|
||||
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
|
||||
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
|
||||
rs := newReplicaSet(foo, "foo-1", 1)
|
||||
pod := generatePodFromRS(rs)
|
||||
|
||||
@ -389,7 +387,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) {
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
enqueued := false
|
||||
c.enqueueDeployment = func(d *extensions.Deployment) {
|
||||
c.enqueueDeployment = func(d *apps.Deployment) {
|
||||
if d.Name == "foo" {
|
||||
enqueued = true
|
||||
}
|
||||
@ -409,7 +407,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
|
||||
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
|
||||
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
|
||||
rs1 := newReplicaSet(foo, "foo-1", 1)
|
||||
rs2 := newReplicaSet(foo, "foo-1", 1)
|
||||
pod1 := generatePodFromRS(rs1)
|
||||
@ -425,7 +423,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
enqueued := false
|
||||
c.enqueueDeployment = func(d *extensions.Deployment) {
|
||||
c.enqueueDeployment = func(d *apps.Deployment) {
|
||||
if d.Name == "foo" {
|
||||
enqueued = true
|
||||
}
|
||||
@ -446,7 +444,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi
|
||||
f := newFixture(t)
|
||||
|
||||
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
|
||||
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
|
||||
rs1 := newReplicaSet(foo, "foo-1", 1)
|
||||
rs2 := newReplicaSet(foo, "foo-2", 2)
|
||||
rs2.OwnerReferences = nil
|
||||
@ -461,7 +459,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
enqueued := false
|
||||
c.enqueueDeployment = func(d *extensions.Deployment) {
|
||||
c.enqueueDeployment = func(d *apps.Deployment) {
|
||||
if d.Name == "foo" {
|
||||
enqueued = true
|
||||
}
|
||||
@ -482,7 +480,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t
|
||||
f := newFixture(t)
|
||||
|
||||
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
|
||||
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
|
||||
rs1 := newReplicaSet(foo, "foo-1", 1)
|
||||
rs2 := newReplicaSet(foo, "foo-2", 2)
|
||||
rs2.OwnerReferences = nil
|
||||
@ -500,7 +498,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
enqueued := false
|
||||
c.enqueueDeployment = func(d *extensions.Deployment) {
|
||||
c.enqueueDeployment = func(d *apps.Deployment) {
|
||||
if d.Name == "foo" {
|
||||
enqueued = true
|
||||
}
|
||||
@ -973,7 +971,7 @@ func bumpResourceVersion(obj metav1.Object) {
|
||||
}
|
||||
|
||||
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
|
||||
func generatePodFromRS(rs *extensions.ReplicaSet) *v1.Pod {
|
||||
func generatePodFromRS(rs *apps.ReplicaSet) *v1.Pod {
|
||||
trueVar := true
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go
generated
vendored
@ -23,8 +23,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
@ -32,18 +32,18 @@ import (
|
||||
// cases this helper will run that cannot be prevented from the scaling detection,
|
||||
// for example a resync of the deployment after it was scaled up. In those cases,
|
||||
// we shouldn't try to estimate any progress.
|
||||
func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error {
|
||||
func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
|
||||
newStatus := calculateStatus(allRSs, newRS, d)
|
||||
|
||||
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
|
||||
if d.Spec.ProgressDeadlineSeconds == nil {
|
||||
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
|
||||
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
|
||||
}
|
||||
|
||||
// If there is only one replica set that is active then that means we are not running
|
||||
// a new rollout and this is a resync where we don't need to estimate any progress.
|
||||
// In such a case, we should simply not estimate any progress for this deployment.
|
||||
currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
|
||||
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
|
||||
// Check for progress only if there is a progress deadline set and the latest rollout
|
||||
// hasn't completed yet.
|
||||
@ -56,7 +56,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
if newRS != nil {
|
||||
msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name)
|
||||
}
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
|
||||
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
|
||||
case util.DeploymentProgressing(d, &newStatus):
|
||||
@ -66,7 +66,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
if newRS != nil {
|
||||
msg = fmt.Sprintf("ReplicaSet %q is progressing.", newRS.Name)
|
||||
}
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
|
||||
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
|
||||
// Update the current Progressing condition or add a new one if it doesn't exist.
|
||||
// If a Progressing condition with status=true already exists, we should update
|
||||
// everything but lastTransitionTime. SetDeploymentCondition already does that but
|
||||
@ -78,7 +78,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
if currentCond.Status == v1.ConditionTrue {
|
||||
condition.LastTransitionTime = currentCond.LastTransitionTime
|
||||
}
|
||||
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
|
||||
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
|
||||
}
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
|
||||
@ -89,7 +89,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
if newRS != nil {
|
||||
msg = fmt.Sprintf("ReplicaSet %q has timed out progressing.", newRS.Name)
|
||||
}
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
|
||||
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
}
|
||||
}
|
||||
@ -100,7 +100,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// There will be only one ReplicaFailure condition on the replica set.
|
||||
util.SetDeploymentCondition(&newStatus, replicaFailureCond[0])
|
||||
} else {
|
||||
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentReplicaFailure)
|
||||
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentReplicaFailure)
|
||||
}
|
||||
|
||||
// Do not update if there is nothing new to add.
|
||||
@ -112,17 +112,17 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
|
||||
newDeployment := d
|
||||
newDeployment.Status = newStatus
|
||||
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
return err
|
||||
}
|
||||
|
||||
// getReplicaFailures will convert replica failure conditions from replica sets
|
||||
// to deployment conditions.
|
||||
func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) []extensions.DeploymentCondition {
|
||||
var conditions []extensions.DeploymentCondition
|
||||
func (dc *DeploymentController) getReplicaFailures(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) []apps.DeploymentCondition {
|
||||
var conditions []apps.DeploymentCondition
|
||||
if newRS != nil {
|
||||
for _, c := range newRS.Status.Conditions {
|
||||
if c.Type != extensions.ReplicaSetReplicaFailure {
|
||||
if c.Type != apps.ReplicaSetReplicaFailure {
|
||||
continue
|
||||
}
|
||||
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
|
||||
@ -141,7 +141,7 @@ func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaS
|
||||
}
|
||||
|
||||
for _, c := range rs.Status.Conditions {
|
||||
if c.Type != extensions.ReplicaSetReplicaFailure {
|
||||
if c.Type != apps.ReplicaSetReplicaFailure {
|
||||
continue
|
||||
}
|
||||
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
|
||||
@ -156,8 +156,8 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
// requeueStuckDeployment checks whether the provided deployment needs to be synced for a progress
|
||||
// check. It returns the time after the deployment will be requeued for the progress check, 0 if it
|
||||
// will be requeued now, or -1 if it does not need to be requeued.
|
||||
func (dc *DeploymentController) requeueStuckDeployment(d *extensions.Deployment, newStatus extensions.DeploymentStatus) time.Duration {
|
||||
currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
|
||||
func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration {
|
||||
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
// Can't estimate progress if there is no deadline in the spec or progressing condition in the current status.
|
||||
if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil {
|
||||
return time.Duration(-1)
|
||||
|
116
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
116
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
@ -20,16 +20,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) extensions.DeploymentStatus {
|
||||
return extensions.DeploymentStatus{
|
||||
func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) apps.DeploymentStatus {
|
||||
return apps.DeploymentStatus{
|
||||
Replicas: replicas,
|
||||
UpdatedReplicas: updatedReplicas,
|
||||
AvailableReplicas: availableReplicas,
|
||||
@ -37,16 +37,16 @@ func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) ext
|
||||
}
|
||||
|
||||
// assumes the retuned deployment is always observed - not needed to be tested here.
|
||||
func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []extensions.DeploymentCondition) *extensions.Deployment {
|
||||
d := &extensions.Deployment{
|
||||
func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []apps.DeploymentCondition) *apps.Deployment {
|
||||
d := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progress-test",
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
ProgressDeadlineSeconds: pds,
|
||||
Replicas: &replicas,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RecreateDeploymentStrategyType,
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RecreateDeploymentStrategyType,
|
||||
},
|
||||
},
|
||||
Status: newDeploymentStatus(statusReplicas, updatedReplicas, availableReplicas),
|
||||
@ -56,9 +56,9 @@ func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, av
|
||||
}
|
||||
|
||||
// helper to create RS with given availableReplicas
|
||||
func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *extensions.ReplicaSet {
|
||||
func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *apps.ReplicaSet {
|
||||
rs := rs(name, specReplicas, nil, metav1.Time{})
|
||||
rs.Status = extensions.ReplicaSetStatus{
|
||||
rs.Status = apps.ReplicaSetStatus{
|
||||
Replicas: int32(statusReplicas),
|
||||
AvailableReplicas: int32(availableReplicas),
|
||||
}
|
||||
@ -67,16 +67,16 @@ func newRSWithAvailable(name string, specReplicas, statusReplicas, availableRepl
|
||||
|
||||
func TestRequeueStuckDeployment(t *testing.T) {
|
||||
pds := int32(60)
|
||||
failed := []extensions.DeploymentCondition{
|
||||
failed := []apps.DeploymentCondition{
|
||||
{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: util.TimedOutReason,
|
||||
},
|
||||
}
|
||||
stuck := []extensions.DeploymentCondition{
|
||||
stuck := []apps.DeploymentCondition{
|
||||
{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC),
|
||||
},
|
||||
@ -84,8 +84,8 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
d *extensions.Deployment
|
||||
status extensions.DeploymentStatus
|
||||
d *apps.Deployment
|
||||
status apps.DeploymentStatus
|
||||
nowFn func() time.Time
|
||||
expected time.Duration
|
||||
}{
|
||||
@ -178,20 +178,20 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
func TestSyncRolloutStatus(t *testing.T) {
|
||||
pds := int32(60)
|
||||
testTime := metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC)
|
||||
failedTimedOut := extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
failedTimedOut := apps.DeploymentCondition{
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: util.TimedOutReason,
|
||||
}
|
||||
newRSAvailable := extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
newRSAvailable := apps.DeploymentCondition{
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: util.NewRSAvailableReason,
|
||||
LastUpdateTime: testTime,
|
||||
LastTransitionTime: testTime,
|
||||
}
|
||||
replicaSetUpdated := extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
replicaSetUpdated := apps.DeploymentCondition{
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: util.ReplicaSetUpdatedReason,
|
||||
LastUpdateTime: testTime,
|
||||
@ -200,10 +200,10 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
d *extensions.Deployment
|
||||
allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
conditionType extensions.DeploymentConditionType
|
||||
d *apps.Deployment
|
||||
allRSs []*apps.ReplicaSet
|
||||
newRS *apps.ReplicaSet
|
||||
conditionType apps.DeploymentConditionType
|
||||
conditionStatus v1.ConditionStatus
|
||||
conditionReason string
|
||||
lastUpdate metav1.Time
|
||||
@ -211,15 +211,15 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "General: remove Progressing condition and do not estimate progress if deployment has no Progress Deadline",
|
||||
d: currentDeployment(nil, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
d: currentDeployment(nil, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
newRS: newRSWithAvailable("foo", 3, 2, 2),
|
||||
},
|
||||
{
|
||||
name: "General: do not estimate progress of deployment with only one active ReplicaSet",
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{newRSAvailable}),
|
||||
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)},
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{newRSAvailable}),
|
||||
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)},
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.NewRSAvailableReason,
|
||||
lastUpdate: testTime,
|
||||
@ -227,83 +227,83 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "DeploymentProgressing: dont update lastTransitionTime if deployment already has Progressing=True",
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
newRS: newRSWithAvailable("foo", 3, 2, 2),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.ReplicaSetUpdatedReason,
|
||||
lastTransition: testTime,
|
||||
},
|
||||
{
|
||||
name: "DeploymentProgressing: update everything if deployment has Progressing=False",
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}),
|
||||
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}),
|
||||
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
newRS: newRSWithAvailable("foo", 3, 2, 2),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.ReplicaSetUpdatedReason,
|
||||
},
|
||||
{
|
||||
name: "DeploymentProgressing: create Progressing condition if it does not exist",
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{}),
|
||||
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{}),
|
||||
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
|
||||
newRS: newRSWithAvailable("foo", 3, 2, 2),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.ReplicaSetUpdatedReason,
|
||||
},
|
||||
{
|
||||
name: "DeploymentComplete: dont update lastTransitionTime if deployment already has Progressing=True",
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*extensions.ReplicaSet{},
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*apps.ReplicaSet{},
|
||||
newRS: newRSWithAvailable("foo", 3, 3, 3),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.NewRSAvailableReason,
|
||||
lastTransition: testTime,
|
||||
},
|
||||
{
|
||||
name: "DeploymentComplete: update everything if deployment has Progressing=False",
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{failedTimedOut}),
|
||||
allRSs: []*extensions.ReplicaSet{},
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{failedTimedOut}),
|
||||
allRSs: []*apps.ReplicaSet{},
|
||||
newRS: newRSWithAvailable("foo", 3, 3, 3),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.NewRSAvailableReason,
|
||||
},
|
||||
{
|
||||
name: "DeploymentComplete: create Progressing condition if it does not exist",
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{}),
|
||||
allRSs: []*extensions.ReplicaSet{},
|
||||
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{}),
|
||||
allRSs: []*apps.ReplicaSet{},
|
||||
newRS: newRSWithAvailable("foo", 3, 3, 3),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.NewRSAvailableReason,
|
||||
},
|
||||
{
|
||||
name: "DeploymentComplete: defend against NPE when newRS=nil",
|
||||
d: currentDeployment(&pds, 0, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)},
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
d: currentDeployment(&pds, 0, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*apps.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)},
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionTrue,
|
||||
conditionReason: util.NewRSAvailableReason,
|
||||
},
|
||||
{
|
||||
name: "DeploymentTimedOut: update status if rollout exceeds Progress Deadline",
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*extensions.ReplicaSet{},
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
|
||||
allRSs: []*apps.ReplicaSet{},
|
||||
newRS: newRSWithAvailable("foo", 3, 2, 2),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionFalse,
|
||||
conditionReason: util.TimedOutReason,
|
||||
},
|
||||
{
|
||||
name: "DeploymentTimedOut: do not update status if deployment has existing timedOut condition",
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}),
|
||||
allRSs: []*extensions.ReplicaSet{},
|
||||
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}),
|
||||
allRSs: []*apps.ReplicaSet{},
|
||||
newRS: newRSWithAvailable("foo", 3, 2, 2),
|
||||
conditionType: extensions.DeploymentProgressing,
|
||||
conditionType: apps.DeploymentProgressing,
|
||||
conditionStatus: v1.ConditionFalse,
|
||||
conditionReason: util.TimedOutReason,
|
||||
lastUpdate: testTime,
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
@ -17,15 +17,15 @@ limitations under the License.
|
||||
package deployment
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
// rolloutRecreate implements the logic for recreating a replica set.
|
||||
func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
@ -74,7 +74,7 @@ func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList
|
||||
}
|
||||
|
||||
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate".
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
scaled := false
|
||||
for i := range oldRSs {
|
||||
rs := oldRSs[i]
|
||||
@ -95,7 +95,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext
|
||||
}
|
||||
|
||||
// oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods.
|
||||
func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) bool {
|
||||
func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) bool {
|
||||
if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 {
|
||||
return true
|
||||
}
|
||||
@ -123,7 +123,7 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe
|
||||
}
|
||||
|
||||
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
|
||||
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
|
||||
return scaled, err
|
||||
}
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -33,7 +33,7 @@ import (
|
||||
func TestScaleDownOldReplicaSets(t *testing.T) {
|
||||
tests := []struct {
|
||||
oldRSSizes []int
|
||||
d *extensions.Deployment
|
||||
d *apps.Deployment
|
||||
}{
|
||||
{
|
||||
oldRSSizes: []int{3},
|
||||
@ -45,7 +45,7 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
|
||||
t.Logf("running scenario %d", i)
|
||||
test := tests[i]
|
||||
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var oldRSs []*apps.ReplicaSet
|
||||
var expected []runtime.Object
|
||||
|
||||
for n, size := range test.oldRSSizes {
|
||||
@ -58,14 +58,14 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
|
||||
rsCopy.Spec.Replicas = &zero
|
||||
expected = append(expected, rsCopy)
|
||||
|
||||
if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*extensions.ReplicaSet).Spec.Replicas) {
|
||||
if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*apps.ReplicaSet).Spec.Replicas) {
|
||||
t.Errorf("broken test - original and expected RS have the same size")
|
||||
}
|
||||
}
|
||||
|
||||
kc := fake.NewSimpleClientset(expected...)
|
||||
informers := informers.NewSharedInformerFactory(kc, controller.NoResyncPeriodFunc())
|
||||
c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc)
|
||||
c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), kc)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
@ -86,8 +86,8 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
newRS *extensions.ReplicaSet
|
||||
oldRSs []*extensions.ReplicaSet
|
||||
newRS *apps.ReplicaSet
|
||||
oldRSs []*apps.ReplicaSet
|
||||
podMap map[types.UID]*v1.PodList
|
||||
|
||||
hasOldPodsRunning bool
|
||||
@ -98,23 +98,23 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old RSs with running pods",
|
||||
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
|
||||
oldRSs: []*apps.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
|
||||
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs without pods but with non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs without pods or non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state are present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
@ -135,7 +135,7 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pod in unknown phase present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
@ -151,7 +151,7 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with pending pod present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
@ -167,7 +167,7 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with running pod present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
@ -183,7 +183,7 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
@ -225,7 +225,7 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func rsWithUID(uid string) *extensions.ReplicaSet {
|
||||
func rsWithUID(uid string) *apps.ReplicaSet {
|
||||
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0)
|
||||
rs.UID = types.UID(uid)
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go
generated
vendored
@ -18,9 +18,11 @@ package deployment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -28,17 +30,17 @@ import (
|
||||
)
|
||||
|
||||
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
|
||||
func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allRSs := append(allOldRSs, newRS)
|
||||
toRevision := &d.Spec.RollbackTo.Revision
|
||||
rollbackTo := getRollbackTo(d)
|
||||
// If rollback revision is 0, rollback to the last revision
|
||||
if *toRevision == 0 {
|
||||
if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 {
|
||||
if rollbackTo.Revision == 0 {
|
||||
if rollbackTo.Revision = deploymentutil.LastRevision(allRSs); rollbackTo.Revision == 0 {
|
||||
// If we still can't find the last revision, gives up rollback
|
||||
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
|
||||
// Gives up rollback
|
||||
@ -51,14 +53,14 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext
|
||||
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
|
||||
continue
|
||||
}
|
||||
if v == *toRevision {
|
||||
if v == rollbackTo.Revision {
|
||||
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
|
||||
// rollback by copying podTemplate.Spec from the replica set
|
||||
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
|
||||
// no-op if the spec matches current deployment's podTemplate.Spec
|
||||
performedRollback, err := dc.rollbackToTemplate(d, rs)
|
||||
if performedRollback && err == nil {
|
||||
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision))
|
||||
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -71,7 +73,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext
|
||||
// rollbackToTemplate compares the templates of the provided deployment and replica set and
|
||||
// updates the deployment with the replica set template in case they are different. It also
|
||||
// cleans up the rollback spec so subsequent requeues of the deployment won't end up in here.
|
||||
func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs *extensions.ReplicaSet) (bool, error) {
|
||||
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
|
||||
performedRollback := false
|
||||
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
|
||||
@ -98,20 +100,49 @@ func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs
|
||||
return performedRollback, dc.updateDeploymentAndClearRollbackTo(d)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackWarningEvent(d *extensions.Deployment, reason, message string) {
|
||||
func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) {
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, reason, message)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment, message string) {
|
||||
func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, message string) {
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
|
||||
}
|
||||
|
||||
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
|
||||
// It is assumed that the caller will have updated the deployment template appropriately (in case
|
||||
// we want to rollback).
|
||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error {
|
||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
||||
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
d.Spec.RollbackTo = nil
|
||||
_, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d)
|
||||
setRollbackTo(d, nil)
|
||||
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
|
||||
func getRollbackTo(d *apps.Deployment) *extensions.RollbackConfig {
|
||||
// Extract the annotation used for round-tripping the deprecated RollbackTo field.
|
||||
revision := d.Annotations[apps.DeprecatedRollbackTo]
|
||||
if revision == "" {
|
||||
return nil
|
||||
}
|
||||
revision64, err := strconv.ParseInt(revision, 10, 64)
|
||||
if err != nil {
|
||||
// If it's invalid, ignore it.
|
||||
return nil
|
||||
}
|
||||
return &extensions.RollbackConfig{
|
||||
Revision: revision64,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
|
||||
func setRollbackTo(d *apps.Deployment, rollbackTo *extensions.RollbackConfig) {
|
||||
if rollbackTo == nil {
|
||||
delete(d.Annotations, apps.DeprecatedRollbackTo)
|
||||
return
|
||||
}
|
||||
if d.Annotations == nil {
|
||||
d.Annotations = make(map[string]string)
|
||||
}
|
||||
d.Annotations[apps.DeprecatedRollbackTo] = strconv.FormatInt(rollbackTo.Revision, 10)
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
@ -21,8 +21,8 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
// rolloutRolling implements the logic for rolling a new replica set.
|
||||
func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -67,7 +67,7 @@ func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList
|
||||
return dc.syncRolloutStatus(allRSs, newRS, d)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
|
||||
// Scaling not required.
|
||||
return false, nil
|
||||
@ -85,7 +85,7 @@ func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.Repl
|
||||
return scaled, err
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
|
||||
if oldPodsCount == 0 {
|
||||
// Can't scale down further
|
||||
@ -154,7 +154,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
||||
}
|
||||
|
||||
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
|
||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
|
||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) {
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
|
||||
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
|
||||
@ -191,7 +191,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
||||
|
||||
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
|
||||
// Need check maxUnavailable to ensure availability
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) {
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) {
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down.
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling_test.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling_test.go
generated
vendored
@ -19,7 +19,7 @@ package deployment
|
||||
import (
|
||||
"testing"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@ -82,7 +82,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
||||
t.Logf("executing scenario %d", i)
|
||||
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
|
||||
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
||||
allRSs := []*extensions.ReplicaSet{newRS, oldRS}
|
||||
allRSs := []*apps.ReplicaSet{newRS, oldRS}
|
||||
maxUnavailable := intstr.FromInt(0)
|
||||
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
|
||||
fake := fake.Clientset{}
|
||||
@ -109,7 +109,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
||||
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
|
||||
continue
|
||||
}
|
||||
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*apps.ReplicaSet)
|
||||
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
@ -187,8 +187,8 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
||||
newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS)
|
||||
oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp)
|
||||
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
|
||||
oldRSs := []*extensions.ReplicaSet{oldRS}
|
||||
allRSs := []*extensions.ReplicaSet{oldRS, newRS}
|
||||
oldRSs := []*apps.ReplicaSet{oldRS}
|
||||
allRSs := []*apps.ReplicaSet{oldRS, newRS}
|
||||
maxSurge := intstr.FromInt(0)
|
||||
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
|
||||
fakeClientset := fake.Clientset{}
|
||||
@ -255,7 +255,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
||||
t.Logf("executing scenario %d", i)
|
||||
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
||||
oldRS.Status.AvailableReplicas = int32(test.readyPods)
|
||||
oldRSs := []*extensions.ReplicaSet{oldRS}
|
||||
oldRSs := []*apps.ReplicaSet{oldRS}
|
||||
maxSurge := intstr.FromInt(2)
|
||||
maxUnavailable := intstr.FromInt(2)
|
||||
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
|
||||
@ -330,8 +330,8 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
||||
t.Logf("executing scenario %d", i)
|
||||
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
||||
oldRS.Status.AvailableReplicas = int32(test.readyPods)
|
||||
allRSs := []*extensions.ReplicaSet{oldRS}
|
||||
oldRSs := []*extensions.ReplicaSet{oldRS}
|
||||
allRSs := []*apps.ReplicaSet{oldRS}
|
||||
oldRSs := []*apps.ReplicaSet{oldRS}
|
||||
maxSurge := intstr.FromInt(0)
|
||||
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
|
||||
fakeClientset := fake.Clientset{}
|
||||
@ -371,7 +371,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
||||
t.Errorf("expected an update action")
|
||||
continue
|
||||
}
|
||||
updated := updateAction.GetObject().(*extensions.ReplicaSet)
|
||||
updated := updateAction.GetObject().(*apps.ReplicaSet)
|
||||
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
|
191
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
191
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
@ -23,12 +23,11 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@ -36,7 +35,7 @@ import (
|
||||
)
|
||||
|
||||
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
|
||||
func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -48,7 +47,7 @@ func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList
|
||||
|
||||
// sync is responsible for reconciling deployments on scaling events or when they
|
||||
// are paused.
|
||||
func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -60,7 +59,7 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi
|
||||
}
|
||||
|
||||
// Clean up the deployment when it's paused and no rollback is in flight.
|
||||
if d.Spec.Paused && d.Spec.RollbackTo == nil {
|
||||
if d.Spec.Paused && getRollbackTo(d) == nil {
|
||||
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -73,11 +72,11 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi
|
||||
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
|
||||
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
|
||||
// that were paused for longer than progressDeadlineSeconds.
|
||||
func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error {
|
||||
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
|
||||
if d.Spec.ProgressDeadlineSeconds == nil {
|
||||
return nil
|
||||
}
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
|
||||
// If we have reported lack of progress, do not overwrite it with a paused condition.
|
||||
return nil
|
||||
@ -86,11 +85,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
|
||||
needsUpdate := false
|
||||
if d.Spec.Paused && !pausedCondExists {
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
} else if !d.Spec.Paused && pausedCondExists {
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
}
|
||||
@ -100,7 +99,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
}
|
||||
|
||||
var err error
|
||||
d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -116,12 +115,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
//
|
||||
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
|
||||
// This may lead to stale reads of replica sets, thus incorrect deployment status.
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
// List the deployment's RSes & Pods and apply pod-template-hash info to deployment's adopted RSes/Pods
|
||||
rsList, err := dc.rsAndPodsWithHashKeySynced(d, rsList, podMap)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err)
|
||||
}
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
|
||||
|
||||
// Get new replica set with the updated revision number
|
||||
@ -133,105 +127,12 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.D
|
||||
return newRS, allOldRSs, nil
|
||||
}
|
||||
|
||||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment
|
||||
// targets, with pod-template-hash information synced.
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) ([]*extensions.ReplicaSet, error) {
|
||||
var syncedRSList []*extensions.ReplicaSet
|
||||
for _, rs := range rsList {
|
||||
// Add pod-template-hash information if it's not in the RS.
|
||||
// Otherwise, new RS produced by Deployment will overlap with pre-existing ones
|
||||
// that aren't constrained by the pod-template-hash.
|
||||
syncedRS, err := dc.addHashKeyToRSAndPods(rs, podMap[rs.UID], d.Status.CollisionCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
syncedRSList = append(syncedRSList, syncedRS)
|
||||
}
|
||||
return syncedRSList, nil
|
||||
}
|
||||
|
||||
// addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps:
|
||||
// 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created
|
||||
// 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas
|
||||
// 3. Add hash label to the rs's label and selector
|
||||
func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet, podList *v1.PodList, collisionCount *int32) (*extensions.ReplicaSet, error) {
|
||||
// If the rs already has the new hash label in its selector, it's done syncing
|
||||
if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return rs, nil
|
||||
}
|
||||
hash, err := deploymentutil.GetReplicaSetHash(rs, collisionCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label.
|
||||
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
|
||||
func(updated *extensions.ReplicaSet) error {
|
||||
// Precondition: the RS doesn't contain the new hash in its pod template label.
|
||||
if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return utilerrors.ErrPreconditionViolated
|
||||
}
|
||||
updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating replica set %s/%s pod template label with template hash: %v", rs.Namespace, rs.Name, err)
|
||||
}
|
||||
// Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods).
|
||||
if updatedRS.Generation > updatedRS.Status.ObservedGeneration {
|
||||
// TODO: Revisit if we really need to wait here as opposed to returning and
|
||||
// potentially unblocking this worker (can wait up to 1min before timing out).
|
||||
if err = deploymentutil.WaitForReplicaSetUpdated(dc.rsLister, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil {
|
||||
return nil, fmt.Errorf("error waiting for replica set %s/%s to be observed by controller: %v", updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
glog.V(4).Infof("Observed the update of replica set %s/%s's pod template with hash %s.", rs.Namespace, rs.Name, hash)
|
||||
}
|
||||
|
||||
// 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted.
|
||||
if err := deploymentutil.LabelPodsWithHash(podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil {
|
||||
return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err)
|
||||
}
|
||||
|
||||
// We need to wait for the replicaset controller to observe the pods being
|
||||
// labeled with pod template hash. Because previously we've called
|
||||
// WaitForReplicaSetUpdated, the replicaset controller should have dropped
|
||||
// FullyLabeledReplicas to 0 already, we only need to wait it to increase
|
||||
// back to the number of replicas in the spec.
|
||||
// TODO: Revisit if we really need to wait here as opposed to returning and
|
||||
// potentially unblocking this worker (can wait up to 1min before timing out).
|
||||
if err := deploymentutil.WaitForPodsHashPopulated(dc.rsLister, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil {
|
||||
return nil, fmt.Errorf("Replica set %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
|
||||
// 3. Update rs label and selector to include the new hash label
|
||||
// Copy the old selector, so that we can scrub out any orphaned pods
|
||||
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
|
||||
// Precondition: the RS doesn't contain the new hash in its label and selector.
|
||||
if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return utilerrors.ErrPreconditionViolated
|
||||
}
|
||||
updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
return nil
|
||||
})
|
||||
// If the RS isn't actually updated, that's okay, we'll retry in the
|
||||
// next sync loop since its selector isn't updated yet.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating ReplicaSet %s/%s label and selector with template hash: %v", updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
|
||||
// TODO: look for orphaned pods and label them in the background somewhere else periodically
|
||||
return updatedRS, nil
|
||||
}
|
||||
|
||||
// Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
|
||||
// 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
|
||||
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
|
||||
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
|
||||
// Note that the pod-template-hash will be added to adopted RSes and pods.
|
||||
func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
|
||||
existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
|
||||
|
||||
// Calculate the max revision number among all old RSes
|
||||
@ -251,7 +152,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
|
||||
if annotationsUpdated || minReadySecondsNeedsUpdate {
|
||||
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
|
||||
return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
|
||||
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
|
||||
}
|
||||
|
||||
// Should use the revision in existingNewRS's annotation, since it set by before
|
||||
@ -259,17 +160,17 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
// If no other Progressing condition has been recorded and we need to estimate the progress
|
||||
// of this deployment then it is likely that old users started caring about progress. In that
|
||||
// case we need to take into account the first time we noticed their new replica set.
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil && cond == nil {
|
||||
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
var err error
|
||||
if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
|
||||
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -283,19 +184,20 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
// new ReplicaSet does not exist, create one.
|
||||
newRSTemplate := *d.Spec.Template.DeepCopy()
|
||||
podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount))
|
||||
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
// Add podTemplateHash label to selector.
|
||||
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
|
||||
// Create new ReplicaSet
|
||||
newRS := extensions.ReplicaSet{
|
||||
newRS := apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash),
|
||||
Namespace: d.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
|
||||
Labels: newRSTemplate.Labels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: new(int32),
|
||||
MinReadySeconds: d.Spec.MinReadySeconds,
|
||||
Selector: newRSSelector,
|
||||
@ -315,7 +217,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
// hash collisions. If there is any other error, we need to report it in the status of
|
||||
// the Deployment.
|
||||
alreadyExists := false
|
||||
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
|
||||
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS)
|
||||
switch {
|
||||
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
||||
case errors.IsAlreadyExists(err):
|
||||
@ -347,7 +249,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
*d.Status.CollisionCount++
|
||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||
// error.
|
||||
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
@ -355,12 +257,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil {
|
||||
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *cond)
|
||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
|
||||
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
||||
_, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
}
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||
return nil, err
|
||||
@ -372,12 +274,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision)
|
||||
if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil {
|
||||
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
}
|
||||
if needsUpdate {
|
||||
_, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
}
|
||||
return createdRS, err
|
||||
}
|
||||
@ -387,7 +289,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
|
||||
// replicas in the event of a problem with the rolled out template. Should run only on scaling events or
|
||||
// when a deployment is paused and not during the normal rollout process.
|
||||
func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error {
|
||||
func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {
|
||||
// If there is only one active replica set then we should scale that up to the full count of the
|
||||
// deployment. If there is no active replica set, then we should scale up the newest replica set.
|
||||
if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
|
||||
@ -485,7 +387,7 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) {
|
||||
// No need to scale
|
||||
if *(rs.Spec.Replicas) == newScale {
|
||||
return false, rs, nil
|
||||
@ -500,20 +402,19 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep
|
||||
return scaled, newRS, err
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) {
|
||||
rsCopy := rs.DeepCopy()
|
||||
func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
|
||||
|
||||
sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale
|
||||
// TODO: Do not mutate the replica set here, instead simply compare the annotation and if they mismatch
|
||||
// call SetReplicasAnnotations inside the following if clause. Then we can also move the deep-copy from
|
||||
// above inside the if too.
|
||||
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||
sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
|
||||
|
||||
annotationsNeedUpdate := deploymentutil.ReplicasAnnotationsNeedUpdate(rs, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||
|
||||
scaled := false
|
||||
var err error
|
||||
if sizeNeedsUpdate || annotationsNeedUpdate {
|
||||
rsCopy := rs.DeepCopy()
|
||||
*(rsCopy.Spec.Replicas) = newScale
|
||||
rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
if err == nil && sizeNeedsUpdate {
|
||||
scaled = true
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||
@ -525,13 +426,13 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
|
||||
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
|
||||
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
|
||||
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
|
||||
func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error {
|
||||
func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
|
||||
if deployment.Spec.RevisionHistoryLimit == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid deleting replica set with deletion timestamp set
|
||||
aliveFilter := func(rs *extensions.ReplicaSet) bool {
|
||||
aliveFilter := func(rs *apps.ReplicaSet) bool {
|
||||
return rs != nil && rs.ObjectMeta.DeletionTimestamp == nil
|
||||
}
|
||||
cleanableRSes := controller.FilterReplicaSets(oldRSs, aliveFilter)
|
||||
@ -551,7 +452,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||
if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
// Return error instead of aggregating and continuing DELETEs on the theory
|
||||
// that we may be overloading the api server.
|
||||
return err
|
||||
@ -562,7 +463,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
|
||||
}
|
||||
|
||||
// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
|
||||
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error {
|
||||
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
|
||||
newStatus := calculateStatus(allRSs, newRS, d)
|
||||
|
||||
if reflect.DeepEqual(d.Status, newStatus) {
|
||||
@ -571,12 +472,12 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic
|
||||
|
||||
newDeployment := d
|
||||
newDeployment.Status = newStatus
|
||||
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
return err
|
||||
}
|
||||
|
||||
// calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets.
|
||||
func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus {
|
||||
func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) apps.DeploymentStatus {
|
||||
availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
|
||||
totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
unavailableReplicas := totalReplicas - availableReplicas
|
||||
@ -586,11 +487,11 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
|
||||
unavailableReplicas = 0
|
||||
}
|
||||
|
||||
status := extensions.DeploymentStatus{
|
||||
status := apps.DeploymentStatus{
|
||||
// TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value.
|
||||
ObservedGeneration: deployment.Generation,
|
||||
Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs),
|
||||
UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}),
|
||||
UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*apps.ReplicaSet{newRS}),
|
||||
ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs),
|
||||
AvailableReplicas: availableReplicas,
|
||||
UnavailableReplicas: unavailableReplicas,
|
||||
@ -604,10 +505,10 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
|
||||
}
|
||||
|
||||
if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
|
||||
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
|
||||
minAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
|
||||
deploymentutil.SetDeploymentCondition(&status, *minAvailability)
|
||||
} else {
|
||||
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
|
||||
noMinAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
|
||||
deploymentutil.SetDeploymentCondition(&status, *noMinAvailability)
|
||||
}
|
||||
|
||||
@ -619,7 +520,7 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
|
||||
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
104
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
104
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -41,7 +41,7 @@ func TestScale(t *testing.T) {
|
||||
oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC)
|
||||
olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
var updatedTemplate = func(replicas int) *extensions.Deployment {
|
||||
var updatedTemplate = func(replicas int) *apps.Deployment {
|
||||
d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||
d.Spec.Template.Labels["another"] = "label"
|
||||
return d
|
||||
@ -49,14 +49,14 @@ func TestScale(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *extensions.Deployment
|
||||
oldDeployment *extensions.Deployment
|
||||
deployment *apps.Deployment
|
||||
oldDeployment *apps.Deployment
|
||||
|
||||
newRS *extensions.ReplicaSet
|
||||
oldRSs []*extensions.ReplicaSet
|
||||
newRS *apps.ReplicaSet
|
||||
oldRSs []*apps.ReplicaSet
|
||||
|
||||
expectedNew *extensions.ReplicaSet
|
||||
expectedOld []*extensions.ReplicaSet
|
||||
expectedNew *apps.ReplicaSet
|
||||
expectedOld []*apps.ReplicaSet
|
||||
wasntUpdated map[string]bool
|
||||
|
||||
desiredReplicasAnnotations map[string]int32
|
||||
@ -67,10 +67,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v1", 10, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{},
|
||||
oldRSs: []*apps.ReplicaSet{},
|
||||
|
||||
expectedNew: rs("foo-v1", 12, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{},
|
||||
expectedOld: []*apps.ReplicaSet{},
|
||||
},
|
||||
{
|
||||
name: "normal scaling event: 10 -> 5",
|
||||
@ -78,10 +78,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v1", 10, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{},
|
||||
oldRSs: []*apps.ReplicaSet{},
|
||||
|
||||
expectedNew: rs("foo-v1", 5, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{},
|
||||
expectedOld: []*apps.ReplicaSet{},
|
||||
},
|
||||
{
|
||||
name: "proportional scaling: 5 -> 10",
|
||||
@ -89,10 +89,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 2, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "proportional scaling: 5 -> 3",
|
||||
@ -100,10 +100,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 2, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 1, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "proportional scaling: 9 -> 4",
|
||||
@ -111,10 +111,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 8, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "proportional scaling: 7 -> 10",
|
||||
@ -122,10 +122,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 2, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 3, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "proportional scaling: 13 -> 8",
|
||||
@ -133,10 +133,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 2, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
},
|
||||
// Scales up the new replica set.
|
||||
{
|
||||
@ -145,10 +145,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 1, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
},
|
||||
// Scales down the older replica set.
|
||||
{
|
||||
@ -157,10 +157,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 1, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
},
|
||||
// Scales up the latest replica set first.
|
||||
{
|
||||
@ -169,10 +169,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil),
|
||||
|
||||
newRS: nil,
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: nil,
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
|
||||
},
|
||||
// Scales down to zero
|
||||
{
|
||||
@ -181,10 +181,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 3, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 0, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
},
|
||||
// Scales up from zero
|
||||
{
|
||||
@ -193,10 +193,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 0, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 6, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
|
||||
wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true},
|
||||
},
|
||||
// Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 )
|
||||
@ -208,10 +208,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
|
||||
|
||||
newRS: rs("foo-v3", 2, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
|
||||
wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true},
|
||||
|
||||
desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)},
|
||||
@ -222,10 +222,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 6, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 11, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "change both surge and size",
|
||||
@ -233,10 +233,10 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil),
|
||||
|
||||
newRS: rs("foo-v2", 5, nil, newTimestamp),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 22, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "change both size and template",
|
||||
@ -244,25 +244,25 @@ func TestScale(t *testing.T) {
|
||||
oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}),
|
||||
|
||||
newRS: nil,
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: nil,
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
|
||||
},
|
||||
{
|
||||
name: "saturated but broken new replica set does not affect old pods",
|
||||
deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
|
||||
oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
|
||||
|
||||
newRS: func() *extensions.ReplicaSet {
|
||||
newRS: func() *apps.ReplicaSet {
|
||||
rs := rs("foo-v2", 2, nil, newTimestamp)
|
||||
rs.Status.AvailableReplicas = 0
|
||||
return rs
|
||||
}(),
|
||||
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
|
||||
expectedNew: rs("foo-v2", 2, nil, newTimestamp),
|
||||
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
|
||||
},
|
||||
}
|
||||
|
||||
@ -313,7 +313,7 @@ func TestScale(t *testing.T) {
|
||||
}
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*apps.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
@ -345,12 +345,12 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
alreadyDeleted.DeletionTimestamp = &now
|
||||
|
||||
tests := []struct {
|
||||
oldRSs []*extensions.ReplicaSet
|
||||
oldRSs []*apps.ReplicaSet
|
||||
revisionHistoryLimit int32
|
||||
expectedDeletions int
|
||||
}{
|
||||
{
|
||||
oldRSs: []*extensions.ReplicaSet{
|
||||
oldRSs: []*apps.ReplicaSet{
|
||||
newRSWithStatus("foo-1", 0, 0, selector),
|
||||
newRSWithStatus("foo-2", 0, 0, selector),
|
||||
newRSWithStatus("foo-3", 0, 0, selector),
|
||||
@ -360,7 +360,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// Only delete the replica set with Spec.Replicas = Status.Replicas = 0.
|
||||
oldRSs: []*extensions.ReplicaSet{
|
||||
oldRSs: []*apps.ReplicaSet{
|
||||
newRSWithStatus("foo-1", 0, 0, selector),
|
||||
newRSWithStatus("foo-2", 0, 1, selector),
|
||||
newRSWithStatus("foo-3", 1, 0, selector),
|
||||
@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
oldRSs: []*extensions.ReplicaSet{
|
||||
oldRSs: []*apps.ReplicaSet{
|
||||
newRSWithStatus("foo-1", 0, 0, selector),
|
||||
newRSWithStatus("foo-2", 0, 0, selector),
|
||||
},
|
||||
@ -379,7 +379,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
expectedDeletions: 2,
|
||||
},
|
||||
{
|
||||
oldRSs: []*extensions.ReplicaSet{
|
||||
oldRSs: []*apps.ReplicaSet{
|
||||
newRSWithStatus("foo-1", 1, 1, selector),
|
||||
newRSWithStatus("foo-2", 1, 1, selector),
|
||||
},
|
||||
@ -387,7 +387,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
expectedDeletions: 0,
|
||||
},
|
||||
{
|
||||
oldRSs: []*extensions.ReplicaSet{
|
||||
oldRSs: []*apps.ReplicaSet{
|
||||
alreadyDeleted,
|
||||
},
|
||||
revisionHistoryLimit: 0,
|
||||
@ -401,7 +401,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
|
||||
fake := &fake.Clientset{}
|
||||
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
|
||||
controller, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)
|
||||
controller, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), fake)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
@ -411,7 +411,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
controller.rsListerSynced = alwaysReady
|
||||
controller.podListerSynced = alwaysReady
|
||||
for _, rs := range test.oldRSs {
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
@ -19,8 +19,8 @@ go_library(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -29,11 +29,10 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
@ -49,8 +48,8 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
212
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
212
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -25,20 +25,16 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
"k8s.io/client-go/util/integer"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -102,8 +98,8 @@ const (
|
||||
)
|
||||
|
||||
// NewDeploymentCondition creates a new deployment condition.
|
||||
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
|
||||
return &extensions.DeploymentCondition{
|
||||
func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition {
|
||||
return &apps.DeploymentCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
@ -114,7 +110,7 @@ func NewDeploymentCondition(condType extensions.DeploymentConditionType, status
|
||||
}
|
||||
|
||||
// GetDeploymentCondition returns the condition with the provided type.
|
||||
func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensions.DeploymentConditionType) *extensions.DeploymentCondition {
|
||||
func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
@ -126,7 +122,7 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi
|
||||
|
||||
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
|
||||
// we are about to add already exists and has the same status and reason then we are not going to update.
|
||||
func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) {
|
||||
func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) {
|
||||
currentCond := GetDeploymentCondition(*status, condition.Type)
|
||||
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
|
||||
return
|
||||
@ -140,13 +136,13 @@ func SetDeploymentCondition(status *extensions.DeploymentStatus, condition exten
|
||||
}
|
||||
|
||||
// RemoveDeploymentCondition removes the deployment condition with the provided type.
|
||||
func RemoveDeploymentCondition(status *extensions.DeploymentStatus, condType extensions.DeploymentConditionType) {
|
||||
func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) {
|
||||
status.Conditions = filterOutCondition(status.Conditions, condType)
|
||||
}
|
||||
|
||||
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
|
||||
func filterOutCondition(conditions []extensions.DeploymentCondition, condType extensions.DeploymentConditionType) []extensions.DeploymentCondition {
|
||||
var newConditions []extensions.DeploymentCondition
|
||||
func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition {
|
||||
var newConditions []apps.DeploymentCondition
|
||||
for _, c := range conditions {
|
||||
if c.Type == condType {
|
||||
continue
|
||||
@ -158,9 +154,9 @@ func filterOutCondition(conditions []extensions.DeploymentCondition, condType ex
|
||||
|
||||
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
|
||||
// Useful for promoting replica set failure conditions into deployments.
|
||||
func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentConditionType(cond.Type),
|
||||
func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition {
|
||||
return apps.DeploymentCondition{
|
||||
Type: apps.DeploymentConditionType(cond.Type),
|
||||
Status: cond.Status,
|
||||
LastTransitionTime: cond.LastTransitionTime,
|
||||
LastUpdateTime: cond.LastTransitionTime,
|
||||
@ -170,7 +166,7 @@ func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extens
|
||||
}
|
||||
|
||||
// SetDeploymentRevision updates the revision for a deployment.
|
||||
func SetDeploymentRevision(deployment *extensions.Deployment, revision string) bool {
|
||||
func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool {
|
||||
updated := false
|
||||
|
||||
if deployment.Annotations == nil {
|
||||
@ -185,7 +181,7 @@ func SetDeploymentRevision(deployment *extensions.Deployment, revision string) b
|
||||
}
|
||||
|
||||
// MaxRevision finds the highest revision in the replica sets
|
||||
func MaxRevision(allRSs []*extensions.ReplicaSet) int64 {
|
||||
func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
max := int64(0)
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
@ -199,7 +195,7 @@ func MaxRevision(allRSs []*extensions.ReplicaSet) int64 {
|
||||
}
|
||||
|
||||
// LastRevision finds the second max revision number in all replica sets (the last revision)
|
||||
func LastRevision(allRSs []*extensions.ReplicaSet) int64 {
|
||||
func LastRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
max, secMax := int64(0), int64(0)
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
@ -230,7 +226,7 @@ func Revision(obj runtime.Object) (int64, error) {
|
||||
|
||||
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
|
||||
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
|
||||
func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, newRevision string, exists bool) bool {
|
||||
func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool {
|
||||
// First, copy deployment's annotations (except for apply and revision annotations)
|
||||
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
|
||||
// Then, update replica set's revision annotation
|
||||
@ -287,6 +283,7 @@ var annotationsToSkip = map[string]bool{
|
||||
RevisionHistoryAnnotation: true,
|
||||
DesiredReplicasAnnotation: true,
|
||||
MaxReplicasAnnotation: true,
|
||||
apps.DeprecatedRollbackTo: true,
|
||||
}
|
||||
|
||||
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
|
||||
@ -299,7 +296,7 @@ func skipCopyAnnotation(key string) bool {
|
||||
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
|
||||
// and returns true if replica set's annotation is changed.
|
||||
// Note that apply and revision annotations are not copied.
|
||||
func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool {
|
||||
func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
|
||||
rsAnnotationsChanged := false
|
||||
if rs.Annotations == nil {
|
||||
rs.Annotations = make(map[string]string)
|
||||
@ -320,7 +317,7 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs
|
||||
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
|
||||
// This action should be done if and only if the deployment is rolling back to this rs.
|
||||
// Note that apply and revision annotations are not changed.
|
||||
func SetDeploymentAnnotationsTo(deployment *extensions.Deployment, rollbackToRS *extensions.ReplicaSet) {
|
||||
func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) {
|
||||
deployment.Annotations = getSkippedAnnotations(deployment.Annotations)
|
||||
for k, v := range rollbackToRS.Annotations {
|
||||
if !skipCopyAnnotation(k) {
|
||||
@ -341,7 +338,7 @@ func getSkippedAnnotations(annotations map[string]string) map[string]string {
|
||||
|
||||
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
|
||||
// replica set. If there are more active replica sets, then we should proportionally scale them.
|
||||
func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) *extensions.ReplicaSet {
|
||||
func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet {
|
||||
if newRS == nil && len(oldRSs) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -364,15 +361,15 @@ func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.Repli
|
||||
}
|
||||
|
||||
// GetDesiredReplicasAnnotation returns the number of desired replicas
|
||||
func GetDesiredReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) {
|
||||
func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
|
||||
return getIntFromAnnotation(rs, DesiredReplicasAnnotation)
|
||||
}
|
||||
|
||||
func getMaxReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) {
|
||||
func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
|
||||
return getIntFromAnnotation(rs, MaxReplicasAnnotation)
|
||||
}
|
||||
|
||||
func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int32, bool) {
|
||||
func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
|
||||
annotationValue, ok := rs.Annotations[annotationKey]
|
||||
if !ok {
|
||||
return int32(0), false
|
||||
@ -386,7 +383,7 @@ func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int3
|
||||
}
|
||||
|
||||
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
|
||||
func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
|
||||
func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
|
||||
updated := false
|
||||
if rs.Annotations == nil {
|
||||
rs.Annotations = make(map[string]string)
|
||||
@ -404,8 +401,24 @@ func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxRepli
|
||||
return updated
|
||||
}
|
||||
|
||||
// AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated
|
||||
func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
|
||||
if rs.Annotations == nil {
|
||||
return true
|
||||
}
|
||||
desiredString := fmt.Sprintf("%d", desiredReplicas)
|
||||
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
|
||||
return true
|
||||
}
|
||||
maxString := fmt.Sprintf("%d", maxReplicas)
|
||||
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
|
||||
func MaxUnavailable(deployment extensions.Deployment) int32 {
|
||||
func MaxUnavailable(deployment apps.Deployment) int32 {
|
||||
if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
|
||||
return int32(0)
|
||||
}
|
||||
@ -418,7 +431,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
|
||||
}
|
||||
|
||||
// MinAvailable returns the minimum available pods of a given deployment
|
||||
func MinAvailable(deployment *extensions.Deployment) int32 {
|
||||
func MinAvailable(deployment *apps.Deployment) int32 {
|
||||
if !IsRollingUpdate(deployment) {
|
||||
return int32(0)
|
||||
}
|
||||
@ -426,7 +439,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 {
|
||||
}
|
||||
|
||||
// MaxSurge returns the maximum surge pods a rolling deployment can take.
|
||||
func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
func MaxSurge(deployment apps.Deployment) int32 {
|
||||
if !IsRollingUpdate(&deployment) {
|
||||
return int32(0)
|
||||
}
|
||||
@ -438,7 +451,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
|
||||
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
|
||||
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
|
||||
func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
|
||||
func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
|
||||
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
|
||||
return int32(0)
|
||||
}
|
||||
@ -460,7 +473,7 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen
|
||||
|
||||
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
|
||||
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
|
||||
func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 {
|
||||
func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
|
||||
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
|
||||
if *(d.Spec.Replicas) == int32(0) {
|
||||
return -*(rs.Spec.Replicas)
|
||||
@ -485,7 +498,7 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in
|
||||
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
|
||||
func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) {
|
||||
func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
|
||||
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@ -497,7 +510,7 @@ func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex
|
||||
|
||||
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -508,7 +521,7 @@ func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex
|
||||
|
||||
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
|
||||
// Returns nil if the new replica set doesn't exist yet.
|
||||
func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) (*extensions.ReplicaSet, error) {
|
||||
func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) {
|
||||
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -517,13 +530,13 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.Ext
|
||||
}
|
||||
|
||||
// RsListFromClient returns an rsListFunc that wraps the given client.
|
||||
func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc {
|
||||
return func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
|
||||
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
|
||||
rsList, err := c.ReplicaSets(namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ret []*extensions.ReplicaSet
|
||||
var ret []*apps.ReplicaSet
|
||||
for i := range rsList.Items {
|
||||
ret = append(ret, &rsList.Items[i])
|
||||
}
|
||||
@ -532,14 +545,14 @@ func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc
|
||||
}
|
||||
|
||||
// TODO: switch this to full namespacers
|
||||
type RsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
|
||||
type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error)
|
||||
|
||||
// ListReplicaSets returns a slice of RSes the given deployment targets.
|
||||
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
|
||||
// because only the controller itself should do that.
|
||||
// However, it does filter out anything whose ControllerRef doesn't match.
|
||||
func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([]*extensions.ReplicaSet, error) {
|
||||
func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
|
||||
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
|
||||
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
|
||||
namespace := deployment.Namespace
|
||||
@ -553,7 +566,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([
|
||||
return nil, err
|
||||
}
|
||||
// Only include those whose ControllerRef matches the Deployment.
|
||||
owned := make([]*extensions.ReplicaSet, 0, len(all))
|
||||
owned := make([]*apps.ReplicaSet, 0, len(all))
|
||||
for _, rs := range all {
|
||||
if metav1.IsControlledBy(rs, deployment) {
|
||||
owned = append(owned, rs)
|
||||
@ -591,7 +604,7 @@ func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSLis
|
||||
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
|
||||
// because only the controller itself should do that.
|
||||
// However, it does filter out anything whose ControllerRef doesn't match.
|
||||
func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
|
||||
func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
|
||||
namespace := deployment.Namespace
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
@ -620,30 +633,21 @@ func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet
|
||||
}
|
||||
|
||||
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
|
||||
// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// Note that we assume input podTemplateSpecs contain non-empty labels
|
||||
// We ignore pod-template-hash because:
|
||||
// 1. The hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// 2. The deployment template won't have hash labels
|
||||
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
|
||||
t1Copy := template1.DeepCopy()
|
||||
t2Copy := template2.DeepCopy()
|
||||
// First, compare template.Labels (ignoring hash)
|
||||
labels1, labels2 := t1Copy.Labels, t2Copy.Labels
|
||||
if len(labels1) > len(labels2) {
|
||||
labels1, labels2 = labels2, labels1
|
||||
}
|
||||
// We make sure len(labels2) >= len(labels1)
|
||||
for k, v := range labels2 {
|
||||
if labels1[k] != v && k != extensions.DefaultDeploymentUniqueLabelKey {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Then, compare the templates without comparing their labels
|
||||
t1Copy.Labels, t2Copy.Labels = nil, nil
|
||||
// Remove hash labels from template.Labels before comparing
|
||||
delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
|
||||
delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
|
||||
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
|
||||
}
|
||||
|
||||
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
|
||||
func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) *extensions.ReplicaSet {
|
||||
func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList))
|
||||
for i := range rsList {
|
||||
if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
|
||||
@ -660,9 +664,9 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R
|
||||
|
||||
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet) {
|
||||
var requiredRSs []*extensions.ReplicaSet
|
||||
var allRSs []*extensions.ReplicaSet
|
||||
func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) {
|
||||
var requiredRSs []*apps.ReplicaSet
|
||||
var allRSs []*apps.ReplicaSet
|
||||
newRS := FindNewReplicaSet(deployment, rsList)
|
||||
for _, rs := range rsList {
|
||||
// Filter out new replica set
|
||||
@ -677,68 +681,18 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.
|
||||
return requiredRSs, allRSs
|
||||
}
|
||||
|
||||
// WaitForReplicaSetUpdated polls the replica set until it is updated.
|
||||
func WaitForReplicaSetUpdated(c extensionslisters.ReplicaSetLister, desiredGeneration int64, namespace, name string) error {
|
||||
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodsHashPopulated polls the replica set until updated and fully labeled.
|
||||
func WaitForPodsHashPopulated(c extensionslisters.ReplicaSetLister, desiredGeneration int64, namespace, name string) error {
|
||||
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration &&
|
||||
rs.Status.FullyLabeledReplicas == *(rs.Spec.Replicas), nil
|
||||
})
|
||||
}
|
||||
|
||||
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister corelisters.PodLister, namespace, name, hash string) error {
|
||||
for _, pod := range podList.Items {
|
||||
// Ignore inactive Pods.
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
// Only label the pod that doesn't already have the new hash
|
||||
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
||||
_, err := UpdatePodWithRetries(c.CoreV1().Pods(namespace), podLister, pod.Namespace, pod.Name,
|
||||
func(podToUpdate *v1.Pod) error {
|
||||
// Precondition: the pod doesn't contain the new hash in its label.
|
||||
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return errors.ErrPreconditionViolated
|
||||
}
|
||||
podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in adding template hash label %s to pod %q: %v", hash, pod.Name, err)
|
||||
}
|
||||
glog.V(4).Infof("Labeled pod %s/%s of ReplicaSet %s/%s with hash %s.", pod.Namespace, pod.Name, namespace, name, hash)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment {
|
||||
func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment {
|
||||
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
|
||||
deployment.Spec.Template.Spec = template.Spec
|
||||
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
|
||||
deployment.Spec.Template.ObjectMeta.Labels,
|
||||
extensions.DefaultDeploymentUniqueLabelKey)
|
||||
apps.DefaultDeploymentUniqueLabelKey)
|
||||
return deployment
|
||||
}
|
||||
|
||||
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
|
||||
totalReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
@ -749,7 +703,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
}
|
||||
|
||||
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
|
||||
func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
|
||||
totalActualReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
@ -760,7 +714,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i
|
||||
}
|
||||
|
||||
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
|
||||
func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
|
||||
totalReadyReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
@ -771,7 +725,7 @@ func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) in
|
||||
}
|
||||
|
||||
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
|
||||
func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
|
||||
totalAvailableReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
@ -782,13 +736,13 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet
|
||||
}
|
||||
|
||||
// IsRollingUpdate returns true if the strategy type is a rolling update.
|
||||
func IsRollingUpdate(deployment *extensions.Deployment) bool {
|
||||
return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType
|
||||
func IsRollingUpdate(deployment *apps.Deployment) bool {
|
||||
return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
|
||||
}
|
||||
|
||||
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
|
||||
// are updated and available, and no old pods are running.
|
||||
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.Replicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
|
||||
@ -799,7 +753,7 @@ func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions
|
||||
// current with the new status of the deployment that the controller is observing. More specifically,
|
||||
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
|
||||
// consider the deployment is progressing.
|
||||
func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
oldStatus := deployment.Status
|
||||
|
||||
// Old replicas that need to be scaled down
|
||||
@ -818,7 +772,7 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
|
||||
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
|
||||
// exists.
|
||||
func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
if deployment.Spec.ProgressDeadlineSeconds == nil {
|
||||
return false
|
||||
}
|
||||
@ -826,7 +780,7 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions
|
||||
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
|
||||
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
|
||||
// again.
|
||||
condition := GetDeploymentCondition(*newStatus, extensions.DeploymentProgressing)
|
||||
condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing)
|
||||
if condition == nil {
|
||||
return false
|
||||
}
|
||||
@ -864,9 +818,9 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions
|
||||
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
|
||||
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
|
||||
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
|
||||
func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) {
|
||||
func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) {
|
||||
switch deployment.Spec.Strategy.Type {
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
case apps.RollingUpdateDeploymentStrategyType:
|
||||
// Check if we can scale up.
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
|
||||
if err != nil {
|
||||
@ -884,7 +838,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
|
||||
// Do not exceed the number of desired replicas.
|
||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
|
||||
return *(newRS.Spec.Replicas) + scaleUpCount, nil
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
case apps.RecreateDeploymentStrategyType:
|
||||
return *(deployment.Spec.Replicas), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
|
||||
@ -895,7 +849,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
|
||||
// Both the deployment and the replica set have to believe this replica set can own all of the desired
|
||||
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
|
||||
// need to be available.
|
||||
func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool {
|
||||
func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
|
||||
if rs == nil {
|
||||
return false
|
||||
}
|
||||
@ -911,7 +865,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
|
||||
|
||||
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
// Returns error if polling timesout.
|
||||
func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
|
||||
func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
|
||||
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := getDeploymentFunc()
|
||||
|
370
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
370
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
@ -25,8 +25,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -53,7 +53,7 @@ func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Cl
|
||||
}
|
||||
|
||||
func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset {
|
||||
rsList, ok := obj.(*extensions.ReplicaSetList)
|
||||
rsList, ok := obj.(*apps.ReplicaSetList)
|
||||
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
name := action.(core.GetAction).GetName()
|
||||
if ok {
|
||||
@ -71,7 +71,7 @@ func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clien
|
||||
|
||||
func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
obj := action.(core.UpdateAction).GetObject().(*apps.ReplicaSet)
|
||||
return true, obj, nil
|
||||
})
|
||||
return fakeClient
|
||||
@ -85,13 +85,13 @@ func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
|
||||
return extensions.ReplicaSet{
|
||||
func generateRSWithLabel(labels map[string]string, image string) apps.ReplicaSet {
|
||||
return apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: names.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -113,10 +113,10 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
|
||||
}
|
||||
}
|
||||
|
||||
func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference {
|
||||
func newDControllerRef(d *apps.Deployment) *metav1.OwnerReference {
|
||||
isController := true
|
||||
return &metav1.OwnerReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: d.GetName(),
|
||||
UID: d.GetUID(),
|
||||
@ -125,16 +125,16 @@ func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference {
|
||||
}
|
||||
|
||||
// generateRS creates a replica set, with the input deployment's template as its template
|
||||
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
||||
func generateRS(deployment apps.Deployment) apps.ReplicaSet {
|
||||
template := deployment.Spec.Template.DeepCopy()
|
||||
return extensions.ReplicaSet{
|
||||
return apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: randomUID(),
|
||||
Name: names.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: template.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)},
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: new(int32),
|
||||
Template: *template,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: template.Labels},
|
||||
@ -147,15 +147,15 @@ func randomUID() types.UID {
|
||||
}
|
||||
|
||||
// generateDeployment creates a deployment, with the input image as its template
|
||||
func generateDeployment(image string) extensions.Deployment {
|
||||
func generateDeployment(image string) apps.Deployment {
|
||||
podLabels := map[string]string{"name": image}
|
||||
terminationSec := int64(30)
|
||||
return extensions.Deployment{
|
||||
return apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: image,
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -188,14 +188,14 @@ func TestGetNewRS(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
objs []runtime.Object
|
||||
expected *extensions.ReplicaSet
|
||||
expected *apps.ReplicaSet
|
||||
}{
|
||||
{
|
||||
"No new ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&v1.PodList{},
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
&apps.ReplicaSetList{
|
||||
Items: []apps.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
generateRS(generateDeployment("bar")),
|
||||
},
|
||||
@ -207,8 +207,8 @@ func TestGetNewRS(t *testing.T) {
|
||||
"Has new ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&v1.PodList{},
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
&apps.ReplicaSetList{
|
||||
Items: []apps.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
generateRS(generateDeployment("bar")),
|
||||
generateRS(generateDeployment("abc")),
|
||||
@ -228,7 +228,7 @@ func TestGetNewRS(t *testing.T) {
|
||||
fakeClient = addListRSReactor(fakeClient, test.objs[1])
|
||||
fakeClient = addUpdatePodsReactor(fakeClient)
|
||||
fakeClient = addUpdateRSReactor(fakeClient)
|
||||
rs, err := GetNewReplicaSet(&newDeployment, fakeClient.ExtensionsV1beta1())
|
||||
rs, err := GetNewReplicaSet(&newDeployment, fakeClient.AppsV1())
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.Name, err)
|
||||
}
|
||||
@ -262,13 +262,13 @@ func TestGetOldRSs(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
objs []runtime.Object
|
||||
expected []*extensions.ReplicaSet
|
||||
expected []*apps.ReplicaSet
|
||||
}{
|
||||
{
|
||||
"No old ReplicaSets",
|
||||
[]runtime.Object{
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
&apps.ReplicaSetList{
|
||||
Items: []apps.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
newRS,
|
||||
generateRS(generateDeployment("bar")),
|
||||
@ -280,8 +280,8 @@ func TestGetOldRSs(t *testing.T) {
|
||||
{
|
||||
"Has old ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
&apps.ReplicaSetList{
|
||||
Items: []apps.ReplicaSet{
|
||||
oldRS2,
|
||||
oldRS,
|
||||
existedRS,
|
||||
@ -291,7 +291,7 @@ func TestGetOldRSs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
[]*extensions.ReplicaSet{&oldRS, &oldRS2},
|
||||
[]*apps.ReplicaSet{&oldRS, &oldRS2},
|
||||
},
|
||||
}
|
||||
|
||||
@ -301,7 +301,7 @@ func TestGetOldRSs(t *testing.T) {
|
||||
fakeClient = addListRSReactor(fakeClient, test.objs[0])
|
||||
fakeClient = addGetRSReactor(fakeClient, test.objs[0])
|
||||
fakeClient = addUpdateRSReactor(fakeClient)
|
||||
_, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.ExtensionsV1beta1())
|
||||
_, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.AppsV1())
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.Name, err)
|
||||
}
|
||||
@ -340,44 +340,56 @@ func TestEqualIgnoreHash(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"Same spec, same labels",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Same spec, only pod-template-hash label value is different",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Same spec, the former doesn't have pod-template-hash label",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Same spec, the label is different, the former doesn't have pod-template-hash label, same number of labels",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Same spec, the label is different, the latter doesn't have pod-template-hash label, same number of labels",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Same spec, the label is different, and the pod-template-hash label value is the same",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Different spec, same labels",
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Different spec, different pod-template-hash label value",
|
||||
generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
|
||||
generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Different spec, the former doesn't have pod-template-hash label",
|
||||
generatePodTemplateSpec("foo-1", "foo-node-1", map[string]string{}, map[string]string{"something": "else"}),
|
||||
generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
@ -419,11 +431,11 @@ func TestFindNewReplicaSet(t *testing.T) {
|
||||
|
||||
deployment := generateDeployment("nginx")
|
||||
newRS := generateRS(deployment)
|
||||
newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
newRS.CreationTimestamp = later
|
||||
|
||||
newRSDup := generateRS(deployment)
|
||||
newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash"
|
||||
newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash"
|
||||
newRSDup.CreationTimestamp = now
|
||||
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
@ -433,26 +445,26 @@ func TestFindNewReplicaSet(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
deployment extensions.Deployment
|
||||
rsList []*extensions.ReplicaSet
|
||||
expected *extensions.ReplicaSet
|
||||
deployment apps.Deployment
|
||||
rsList []*apps.ReplicaSet
|
||||
expected *apps.ReplicaSet
|
||||
}{
|
||||
{
|
||||
Name: "Get new ReplicaSet with the same template as Deployment spec but different pod-template-hash value",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
|
||||
rsList: []*apps.ReplicaSet{&newRS, &oldRS},
|
||||
expected: &newRS,
|
||||
},
|
||||
{
|
||||
Name: "Get the oldest new ReplicaSet when there are more than one ReplicaSet with the same template",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS, &oldRS, &newRSDup},
|
||||
rsList: []*apps.ReplicaSet{&newRS, &oldRS, &newRSDup},
|
||||
expected: &newRSDup,
|
||||
},
|
||||
{
|
||||
Name: "Get nil new ReplicaSet",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&oldRS},
|
||||
rsList: []*apps.ReplicaSet{&oldRS},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
@ -474,11 +486,11 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
deployment := generateDeployment("nginx")
|
||||
newRS := generateRS(deployment)
|
||||
*(newRS.Spec.Replicas) = 1
|
||||
newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
|
||||
newRS.CreationTimestamp = later
|
||||
|
||||
newRSDup := generateRS(deployment)
|
||||
newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash"
|
||||
newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash"
|
||||
newRSDup.CreationTimestamp = now
|
||||
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
@ -489,37 +501,37 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
deployment extensions.Deployment
|
||||
rsList []*extensions.ReplicaSet
|
||||
deployment apps.Deployment
|
||||
rsList []*apps.ReplicaSet
|
||||
podList *v1.PodList
|
||||
expected []*extensions.ReplicaSet
|
||||
expectedRequire []*extensions.ReplicaSet
|
||||
expected []*apps.ReplicaSet
|
||||
expectedRequire []*apps.ReplicaSet
|
||||
}{
|
||||
{
|
||||
Name: "Get old ReplicaSets",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
|
||||
expected: []*extensions.ReplicaSet{&oldRS},
|
||||
rsList: []*apps.ReplicaSet{&newRS, &oldRS},
|
||||
expected: []*apps.ReplicaSet{&oldRS},
|
||||
expectedRequire: nil,
|
||||
},
|
||||
{
|
||||
Name: "Get old ReplicaSets with no new ReplicaSet",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&oldRS},
|
||||
expected: []*extensions.ReplicaSet{&oldRS},
|
||||
rsList: []*apps.ReplicaSet{&oldRS},
|
||||
expected: []*apps.ReplicaSet{&oldRS},
|
||||
expectedRequire: nil,
|
||||
},
|
||||
{
|
||||
Name: "Get old ReplicaSets with two new ReplicaSets, only the oldest new ReplicaSet is seen as new ReplicaSet",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&oldRS, &newRS, &newRSDup},
|
||||
expected: []*extensions.ReplicaSet{&oldRS, &newRS},
|
||||
expectedRequire: []*extensions.ReplicaSet{&newRS},
|
||||
rsList: []*apps.ReplicaSet{&oldRS, &newRS, &newRSDup},
|
||||
expected: []*apps.ReplicaSet{&oldRS, &newRS},
|
||||
expectedRequire: []*apps.ReplicaSet{&newRS},
|
||||
},
|
||||
{
|
||||
Name: "Get empty old ReplicaSets",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS},
|
||||
rsList: []*apps.ReplicaSet{&newRS},
|
||||
expected: nil,
|
||||
expectedRequire: nil,
|
||||
},
|
||||
@ -542,7 +554,7 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
}
|
||||
|
||||
// equal compares the equality of two ReplicaSet slices regardless of their ordering
|
||||
func equal(rss1, rss2 []*extensions.ReplicaSet) bool {
|
||||
func equal(rss1, rss2 []*apps.ReplicaSet) bool {
|
||||
if reflect.DeepEqual(rss1, rss2) {
|
||||
return true
|
||||
}
|
||||
@ -571,19 +583,19 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
sets []*extensions.ReplicaSet
|
||||
sets []*apps.ReplicaSet
|
||||
expectedCount int32
|
||||
expectedActual int32
|
||||
}{
|
||||
{
|
||||
"1:2 Replicas",
|
||||
[]*extensions.ReplicaSet{&rs1},
|
||||
[]*apps.ReplicaSet{&rs1},
|
||||
1,
|
||||
2,
|
||||
},
|
||||
{
|
||||
"3:5 Replicas",
|
||||
[]*extensions.ReplicaSet{&rs1, &rs2},
|
||||
[]*apps.ReplicaSet{&rs1, &rs2},
|
||||
3,
|
||||
5,
|
||||
},
|
||||
@ -667,7 +679,7 @@ func TestResolveFenceposts(t *testing.T) {
|
||||
func TestNewRSNewReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
strategyType extensions.DeploymentStrategyType
|
||||
strategyType apps.DeploymentStrategyType
|
||||
depReplicas int32
|
||||
newRSReplicas int32
|
||||
maxSurge int
|
||||
@ -675,17 +687,17 @@ func TestNewRSNewReplicas(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"can not scale up - to newRSReplicas",
|
||||
extensions.RollingUpdateDeploymentStrategyType,
|
||||
apps.RollingUpdateDeploymentStrategyType,
|
||||
1, 5, 1, 5,
|
||||
},
|
||||
{
|
||||
"scale up - to depReplicas",
|
||||
extensions.RollingUpdateDeploymentStrategyType,
|
||||
apps.RollingUpdateDeploymentStrategyType,
|
||||
6, 2, 10, 6,
|
||||
},
|
||||
{
|
||||
"recreate - to depReplicas",
|
||||
extensions.RecreateDeploymentStrategyType,
|
||||
apps.RecreateDeploymentStrategyType,
|
||||
3, 1, 1, 3,
|
||||
},
|
||||
}
|
||||
@ -697,8 +709,8 @@ func TestNewRSNewReplicas(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
*(newDeployment.Spec.Replicas) = test.depReplicas
|
||||
newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType}
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||
newDeployment.Spec.Strategy = apps.DeploymentStrategy{Type: test.strategyType}
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: func(i int) *intstr.IntOrString {
|
||||
x := intstr.FromInt(i)
|
||||
return &x
|
||||
@ -709,7 +721,7 @@ func TestNewRSNewReplicas(t *testing.T) {
|
||||
}(test.maxSurge),
|
||||
}
|
||||
*(newRC.Spec.Replicas) = test.newRSReplicas
|
||||
rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC)
|
||||
rs, err := NewRSNewReplicas(&newDeployment, []*apps.ReplicaSet{&rs5}, &newRC)
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.Name, err)
|
||||
}
|
||||
@ -721,33 +733,33 @@ func TestNewRSNewReplicas(t *testing.T) {
|
||||
}
|
||||
|
||||
var (
|
||||
condProgressing = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
condProgressing = func() apps.DeploymentCondition {
|
||||
return apps.DeploymentCondition{
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "ForSomeReason",
|
||||
}
|
||||
}
|
||||
|
||||
condProgressing2 = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
condProgressing2 = func() apps.DeploymentCondition {
|
||||
return apps.DeploymentCondition{
|
||||
Type: apps.DeploymentProgressing,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "BecauseItIs",
|
||||
}
|
||||
}
|
||||
|
||||
condAvailable = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentAvailable,
|
||||
condAvailable = func() apps.DeploymentCondition {
|
||||
return apps.DeploymentCondition{
|
||||
Type: apps.DeploymentAvailable,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "AwesomeController",
|
||||
}
|
||||
}
|
||||
|
||||
status = func() *extensions.DeploymentStatus {
|
||||
return &extensions.DeploymentStatus{
|
||||
Conditions: []extensions.DeploymentCondition{condProgressing(), condAvailable()},
|
||||
status = func() *apps.DeploymentStatus {
|
||||
return &apps.DeploymentStatus{
|
||||
Conditions: []apps.DeploymentCondition{condProgressing(), condAvailable()},
|
||||
}
|
||||
}
|
||||
)
|
||||
@ -758,8 +770,8 @@ func TestGetCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status extensions.DeploymentStatus
|
||||
condType extensions.DeploymentConditionType
|
||||
status apps.DeploymentStatus
|
||||
condType apps.DeploymentConditionType
|
||||
|
||||
expected bool
|
||||
}{
|
||||
@ -767,7 +779,7 @@ func TestGetCondition(t *testing.T) {
|
||||
name: "condition exists",
|
||||
|
||||
status: *exampleStatus,
|
||||
condType: extensions.DeploymentAvailable,
|
||||
condType: apps.DeploymentAvailable,
|
||||
|
||||
expected: true,
|
||||
},
|
||||
@ -775,7 +787,7 @@ func TestGetCondition(t *testing.T) {
|
||||
name: "condition does not exist",
|
||||
|
||||
status: *exampleStatus,
|
||||
condType: extensions.DeploymentReplicaFailure,
|
||||
condType: apps.DeploymentReplicaFailure,
|
||||
|
||||
expected: false,
|
||||
},
|
||||
@ -796,23 +808,23 @@ func TestSetCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status *extensions.DeploymentStatus
|
||||
cond extensions.DeploymentCondition
|
||||
status *apps.DeploymentStatus
|
||||
cond apps.DeploymentCondition
|
||||
|
||||
expectedStatus *extensions.DeploymentStatus
|
||||
expectedStatus *apps.DeploymentStatus
|
||||
}{
|
||||
{
|
||||
name: "set for the first time",
|
||||
|
||||
status: &extensions.DeploymentStatus{},
|
||||
status: &apps.DeploymentStatus{},
|
||||
cond: condAvailable(),
|
||||
|
||||
expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condAvailable()}},
|
||||
expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condAvailable()}},
|
||||
},
|
||||
{
|
||||
name: "simple set",
|
||||
|
||||
status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}},
|
||||
status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}},
|
||||
cond: condAvailable(),
|
||||
|
||||
expectedStatus: status(),
|
||||
@ -820,10 +832,10 @@ func TestSetCondition(t *testing.T) {
|
||||
{
|
||||
name: "overwrite",
|
||||
|
||||
status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}},
|
||||
status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}},
|
||||
cond: condProgressing2(),
|
||||
|
||||
expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing2()}},
|
||||
expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing2()}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -841,32 +853,32 @@ func TestRemoveCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status *extensions.DeploymentStatus
|
||||
condType extensions.DeploymentConditionType
|
||||
status *apps.DeploymentStatus
|
||||
condType apps.DeploymentConditionType
|
||||
|
||||
expectedStatus *extensions.DeploymentStatus
|
||||
expectedStatus *apps.DeploymentStatus
|
||||
}{
|
||||
{
|
||||
name: "remove from empty status",
|
||||
|
||||
status: &extensions.DeploymentStatus{},
|
||||
condType: extensions.DeploymentProgressing,
|
||||
status: &apps.DeploymentStatus{},
|
||||
condType: apps.DeploymentProgressing,
|
||||
|
||||
expectedStatus: &extensions.DeploymentStatus{},
|
||||
expectedStatus: &apps.DeploymentStatus{},
|
||||
},
|
||||
{
|
||||
name: "simple remove",
|
||||
|
||||
status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}},
|
||||
condType: extensions.DeploymentProgressing,
|
||||
status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}},
|
||||
condType: apps.DeploymentProgressing,
|
||||
|
||||
expectedStatus: &extensions.DeploymentStatus{},
|
||||
expectedStatus: &apps.DeploymentStatus{},
|
||||
},
|
||||
{
|
||||
name: "doesn't remove anything",
|
||||
|
||||
status: status(),
|
||||
condType: extensions.DeploymentReplicaFailure,
|
||||
condType: apps.DeploymentReplicaFailure,
|
||||
|
||||
expectedStatus: status(),
|
||||
},
|
||||
@ -883,19 +895,19 @@ func TestRemoveCondition(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeploymentComplete(t *testing.T) {
|
||||
deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &desired,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
|
||||
MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)),
|
||||
},
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
},
|
||||
Status: extensions.DeploymentStatus{
|
||||
Status: apps.DeploymentStatus{
|
||||
Replicas: current,
|
||||
UpdatedReplicas: updated,
|
||||
AvailableReplicas: available,
|
||||
@ -906,7 +918,7 @@ func TestDeploymentComplete(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
d *extensions.Deployment
|
||||
d *apps.Deployment
|
||||
|
||||
expected bool
|
||||
}{
|
||||
@ -960,9 +972,9 @@ func TestDeploymentComplete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeploymentProgressing(t *testing.T) {
|
||||
deployment := func(current, updated, ready, available int32) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
Status: extensions.DeploymentStatus{
|
||||
deployment := func(current, updated, ready, available int32) *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
Status: apps.DeploymentStatus{
|
||||
Replicas: current,
|
||||
UpdatedReplicas: updated,
|
||||
ReadyReplicas: ready,
|
||||
@ -970,8 +982,8 @@ func TestDeploymentProgressing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
}
|
||||
newStatus := func(current, updated, ready, available int32) extensions.DeploymentStatus {
|
||||
return extensions.DeploymentStatus{
|
||||
newStatus := func(current, updated, ready, available int32) apps.DeploymentStatus {
|
||||
return apps.DeploymentStatus{
|
||||
Replicas: current,
|
||||
UpdatedReplicas: updated,
|
||||
ReadyReplicas: ready,
|
||||
@ -982,8 +994,8 @@ func TestDeploymentProgressing(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
d *extensions.Deployment
|
||||
newStatus extensions.DeploymentStatus
|
||||
d *apps.Deployment
|
||||
newStatus apps.DeploymentStatus
|
||||
|
||||
expected bool
|
||||
}{
|
||||
@ -1063,13 +1075,13 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
timeFn := func(min, sec int) time.Time {
|
||||
return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC)
|
||||
}
|
||||
deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) extensions.Deployment {
|
||||
return extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
deployment := func(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) apps.Deployment {
|
||||
return apps.Deployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
ProgressDeadlineSeconds: pds,
|
||||
},
|
||||
Status: extensions.DeploymentStatus{
|
||||
Conditions: []extensions.DeploymentCondition{
|
||||
Status: apps.DeploymentStatus{
|
||||
Conditions: []apps.DeploymentCondition{
|
||||
{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
@ -1084,7 +1096,7 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
d extensions.Deployment
|
||||
d apps.Deployment
|
||||
nowFn func() time.Time
|
||||
|
||||
expected bool
|
||||
@ -1092,28 +1104,28 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)),
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)),
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)),
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "previous status was a complete deployment",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}),
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
@ -1129,23 +1141,23 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaxUnavailable(t *testing.T) {
|
||||
deployment := func(replicas int32, maxUnavailable intstr.IntOrString) extensions.Deployment {
|
||||
return extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
deployment := func(replicas int32, maxUnavailable intstr.IntOrString) apps.Deployment {
|
||||
return apps.Deployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||
MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)),
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment extensions.Deployment
|
||||
deployment apps.Deployment
|
||||
expected int32
|
||||
}{
|
||||
{
|
||||
@ -1170,10 +1182,10 @@ func TestMaxUnavailable(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "maxUnavailable with Recreate deployment strategy",
|
||||
deployment: extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RecreateDeploymentStrategyType,
|
||||
deployment: apps.Deployment{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RecreateDeploymentStrategyType,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1265,3 +1277,77 @@ func TestAnnotationUtils(t *testing.T) {
|
||||
})
|
||||
//Tear Down
|
||||
}
|
||||
|
||||
func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
|
||||
|
||||
desiredReplicas := fmt.Sprintf("%d", int32(10))
|
||||
maxReplicas := fmt.Sprintf("%d", int32(20))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
replicaSet *apps.ReplicaSet
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "test Annotations nil",
|
||||
replicaSet: &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "test desiredReplicas update",
|
||||
replicaSet: &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{DesiredReplicasAnnotation: "8", MaxReplicasAnnotation: maxReplicas},
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "test maxReplicas update",
|
||||
replicaSet: &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: "16"},
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "test needn't update",
|
||||
replicaSet: &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: maxReplicas},
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result := ReplicasAnnotationsNeedUpdate(test.replicaSet, 10, 20)
|
||||
if result != test.expected {
|
||||
t.Errorf("case[%d]:%s Expected %v, Got: %v", i, test.name, test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
@ -17,27 +17,23 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// TODO: use client library instead when it starts to support update retries
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updateRSFunc func(rs *extensions.ReplicaSet) error
|
||||
type updateRSFunc func(rs *apps.ReplicaSet) error
|
||||
|
||||
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
|
||||
// The returned bool value can be used to tell if the RS is actually updated.
|
||||
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
|
||||
var rs *extensions.ReplicaSet
|
||||
func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) {
|
||||
var rs *apps.ReplicaSet
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
var err error
|
||||
@ -62,10 +58,3 @@ func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsL
|
||||
|
||||
return rs, retryErr
|
||||
}
|
||||
|
||||
// GetReplicaSetHash returns the pod template hash of a ReplicaSet's pod template space
|
||||
func GetReplicaSetHash(rs *extensions.ReplicaSet, uniquifier *int32) (string, error) {
|
||||
rsTemplate := rs.Spec.Template.DeepCopy()
|
||||
rsTemplate.Labels = labelsutil.CloneAndRemoveLabel(rsTemplate.Labels, extensions.DefaultDeploymentUniqueLabelKey)
|
||||
return fmt.Sprintf("%d", controller.ComputeHash(rsTemplate, uniquifier)), nil
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
@ -47,7 +47,6 @@ go_test(
|
||||
srcs = ["disruption_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
@ -294,7 +294,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
|
||||
if dc.kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool {
|
||||
func (dc *DisruptionController) sync(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption_test.go
generated
vendored
@ -33,7 +33,6 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
@ -142,7 +141,7 @@ func newSelFooBar() *metav1.LabelSelector {
|
||||
func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
|
||||
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
@ -165,7 +164,7 @@ func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrS
|
||||
|
||||
func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
@ -210,7 +209,7 @@ func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
|
||||
|
||||
func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: make(map[string]string),
|
||||
@ -237,7 +236,7 @@ func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
||||
|
||||
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
@ -261,7 +260,7 @@ func newReplicationController(t *testing.T, size int32) (*v1.ReplicationControll
|
||||
|
||||
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
d := &extensions.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
@ -285,7 +284,7 @@ func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
|
||||
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
@ -309,7 +308,7 @@ func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
||||
|
||||
func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
|
||||
ss := &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
@ -42,7 +42,6 @@ go_test(
|
||||
srcs = ["endpoints_controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/endpoints:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
@ -50,6 +49,7 @@ go_test(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/controller/endpoint/OWNERS
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/endpoint/OWNERS
generated
vendored
@ -1,5 +1,10 @@
|
||||
reviewers:
|
||||
- bowei
|
||||
approvers:
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
- matchstick
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
@ -64,7 +64,7 @@ const (
|
||||
// containers in the pod and marks it "Running", till the kubelet stops all
|
||||
// containers and deletes the pod from the apiserver.
|
||||
// This field is deprecated. v1.Service.PublishNotReadyAddresses will replace it
|
||||
// subsequent releases.
|
||||
// subsequent releases. It will be removed no sooner than 1.13.
|
||||
TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
|
||||
)
|
||||
|
||||
@ -372,7 +372,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if e.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing endpoints for service %q: %v", key, err)
|
||||
glog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
|
||||
e.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
@ -385,7 +385,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
func (e *EndpointController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -420,7 +420,8 @@ func (e *EndpointController) syncService(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var tolerateUnreadyEndpoints bool
|
||||
// If the user specified the older (deprecated) annotation, we have to respect it.
|
||||
tolerateUnreadyEndpoints := service.Spec.PublishNotReadyAddresses
|
||||
if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err == nil {
|
||||
@ -454,8 +455,8 @@ func (e *EndpointController) syncService(key string) error {
|
||||
// Allow headless service not to have ports.
|
||||
if len(service.Spec.Ports) == 0 {
|
||||
if service.Spec.ClusterIP == api.ClusterIPNone {
|
||||
epp := v1.EndpointPort{Port: 0, Protocol: v1.ProtocolTCP}
|
||||
subsets, totalReadyEps, totalNotReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints)
|
||||
subsets, totalReadyEps, totalNotReadyEps = addEndpointSubset(subsets, pod, epa, nil, tolerateUnreadyEndpoints)
|
||||
// No need to repack subsets for headless service without ports.
|
||||
}
|
||||
} else {
|
||||
for i := range service.Spec.Ports {
|
||||
@ -470,14 +471,14 @@ func (e *EndpointController) syncService(key string) error {
|
||||
}
|
||||
|
||||
var readyEps, notReadyEps int
|
||||
epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
epp := &v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
subsets, readyEps, notReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints)
|
||||
totalReadyEps = totalReadyEps + readyEps
|
||||
totalNotReadyEps = totalNotReadyEps + notReadyEps
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
}
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
|
||||
// See if there's actually an update here.
|
||||
currentEndpoints, err := e.endpointsLister.Endpoints(service.Namespace).Get(service.Name)
|
||||
@ -561,20 +562,24 @@ func (e *EndpointController) checkLeftoverEndpoints() {
|
||||
}
|
||||
|
||||
func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.EndpointAddress,
|
||||
epp v1.EndpointPort, tolerateUnreadyEndpoints bool) ([]v1.EndpointSubset, int, int) {
|
||||
epp *v1.EndpointPort, tolerateUnreadyEndpoints bool) ([]v1.EndpointSubset, int, int) {
|
||||
var readyEps int = 0
|
||||
var notReadyEps int = 0
|
||||
ports := []v1.EndpointPort{}
|
||||
if epp != nil {
|
||||
ports = append(ports, *epp)
|
||||
}
|
||||
if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) {
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
Ports: ports,
|
||||
})
|
||||
readyEps++
|
||||
} else if shouldPodBeInEndpoints(pod) {
|
||||
glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
NotReadyAddresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
Ports: ports,
|
||||
})
|
||||
notReadyEps++
|
||||
}
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -34,7 +35,6 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -48,7 +48,7 @@ var emptyNodeName string
|
||||
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
|
||||
for i := 0; i < nPods+nNotReady; i++ {
|
||||
p := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
@ -81,7 +81,7 @@ func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotRea
|
||||
func addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(store cache.Store, namespace string, nPods int, nPorts int, restartPolicy v1.RestartPolicy, podPhase v1.PodPhase) {
|
||||
for i := 0; i < nPods; i++ {
|
||||
p := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
@ -110,11 +110,6 @@ func addNotReadyPodsWithSpecifiedRestartPolicyAndPhase(store cache.Store, namesp
|
||||
}
|
||||
}
|
||||
|
||||
type serverResponse struct {
|
||||
statusCode int
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
func makeTestServer(t *testing.T, namespace string) (*httptest.Server, *utiltesting.FakeHandler) {
|
||||
fakeEndpointsHandler := utiltesting.FakeHandler{
|
||||
StatusCode: http.StatusOK,
|
||||
@ -138,7 +133,7 @@ type endpointController struct {
|
||||
}
|
||||
|
||||
func newController(url string) *endpointController {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
endpoints := NewEndpointController(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Services(),
|
||||
informerFactory.Core().V1().Endpoints(), client)
|
||||
@ -753,7 +748,7 @@ func TestSyncEndpointsHeadlessService(t *testing.T) {
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 0, Protocol: "TCP"}},
|
||||
Ports: []v1.EndpointPort{},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequestCount(t, 1)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
@ -54,13 +54,12 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
@ -68,6 +67,7 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
|
161
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
161
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@ -59,10 +60,8 @@ const ResourceResyncTime time.Duration = 0
|
||||
// ensures that the garbage collector operates with a graph that is at least as
|
||||
// up to date as the notification is sent.
|
||||
type GarbageCollector struct {
|
||||
restMapper resettableRESTMapper
|
||||
// clientPool uses the regular dynamicCodec. We need it to update
|
||||
// finalizers. It can be removed if we support patching finalizers.
|
||||
clientPool dynamic.ClientPool
|
||||
restMapper resettableRESTMapper
|
||||
dynamicClient dynamic.Interface
|
||||
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
|
||||
attemptToDelete workqueue.RateLimitingInterface
|
||||
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
|
||||
@ -76,8 +75,7 @@ type GarbageCollector struct {
|
||||
}
|
||||
|
||||
func NewGarbageCollector(
|
||||
metaOnlyClientPool dynamic.ClientPool,
|
||||
clientPool dynamic.ClientPool,
|
||||
dynamicClient dynamic.Interface,
|
||||
mapper resettableRESTMapper,
|
||||
deletableResources map[schema.GroupVersionResource]struct{},
|
||||
ignoredResources map[schema.GroupResource]struct{},
|
||||
@ -88,17 +86,17 @@ func NewGarbageCollector(
|
||||
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
|
||||
absentOwnerCache := NewUIDCache(500)
|
||||
gc := &GarbageCollector{
|
||||
clientPool: clientPool,
|
||||
dynamicClient: dynamicClient,
|
||||
restMapper: mapper,
|
||||
attemptToDelete: attemptToDelete,
|
||||
attemptToOrphan: attemptToOrphan,
|
||||
absentOwnerCache: absentOwnerCache,
|
||||
}
|
||||
gb := &GraphBuilder{
|
||||
metaOnlyClientPool: metaOnlyClientPool,
|
||||
informersStarted: informersStarted,
|
||||
restMapper: mapper,
|
||||
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
||||
dynamicClient: dynamicClient,
|
||||
informersStarted: informersStarted,
|
||||
restMapper: mapper,
|
||||
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
||||
uidToNode: &concurrentUIDToNode{
|
||||
uidToNode: make(map[types.UID]*node),
|
||||
},
|
||||
@ -166,51 +164,79 @@ type resettableRESTMapper interface {
|
||||
// Note that discoveryClient should NOT be shared with gc.restMapper, otherwise
|
||||
// the mapper's underlying discovery client will be unnecessarily reset during
|
||||
// the course of detecting new resources.
|
||||
func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, period time.Duration, stopCh <-chan struct{}) {
|
||||
func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterface, period time.Duration, stopCh <-chan struct{}) {
|
||||
oldResources := make(map[schema.GroupVersionResource]struct{})
|
||||
wait.Until(func() {
|
||||
// Get the current resource list from discovery.
|
||||
newResources := GetDeletableResources(discoveryClient)
|
||||
|
||||
// This can occur if there is an internal error in GetDeletableResources.
|
||||
if len(newResources) == 0 {
|
||||
glog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, time to sync.
|
||||
glog.V(2).Infof("syncing garbage collector with updated resources from discovery: %v", newResources)
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
// have resynced.
|
||||
gc.workerLock.Lock()
|
||||
defer gc.workerLock.Unlock()
|
||||
|
||||
// Resetting the REST mapper will also invalidate the underlying discovery
|
||||
// client. This is a leaky abstraction and assumes behavior about the REST
|
||||
// mapper, but we'll deal with it for now.
|
||||
gc.restMapper.Reset()
|
||||
// Once we get here, we should not unpause workers until we've successfully synced
|
||||
attempt := 0
|
||||
wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {
|
||||
attempt++
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
//
|
||||
// NOTE: It's possible that newResources will diverge from the resources
|
||||
// discovered by restMapper during the call to Reset, since they are
|
||||
// distinct discovery clients invalidated at different times. For example,
|
||||
// newResources may contain resources not returned in the restMapper's
|
||||
// discovery call if the resources appeared in-between the calls. In that
|
||||
// case, the restMapper will fail to map some of newResources until the next
|
||||
// sync period.
|
||||
if err := gc.resyncMonitors(newResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err))
|
||||
return
|
||||
}
|
||||
// TODO: WaitForCacheSync can block forever during normal operation. Could
|
||||
// pass a timeout channel, but we have to consider the implications of
|
||||
// un-pausing the GC with a partially synced graph builder.
|
||||
if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync"))
|
||||
return
|
||||
}
|
||||
// On a reattempt, check if available resources have changed
|
||||
if attempt > 1 {
|
||||
newResources = GetDeletableResources(discoveryClient)
|
||||
if len(newResources) == 0 {
|
||||
glog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
|
||||
|
||||
// Resetting the REST mapper will also invalidate the underlying discovery
|
||||
// client. This is a leaky abstraction and assumes behavior about the REST
|
||||
// mapper, but we'll deal with it for now.
|
||||
gc.restMapper.Reset()
|
||||
glog.V(4).Infof("reset restmapper")
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
//
|
||||
// NOTE: It's possible that newResources will diverge from the resources
|
||||
// discovered by restMapper during the call to Reset, since they are
|
||||
// distinct discovery clients invalidated at different times. For example,
|
||||
// newResources may contain resources not returned in the restMapper's
|
||||
// discovery call if the resources appeared in-between the calls. In that
|
||||
// case, the restMapper will fail to map some of newResources until the next
|
||||
// attempt.
|
||||
if err := gc.resyncMonitors(newResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("resynced monitors")
|
||||
|
||||
// wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing.
|
||||
// this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
|
||||
// informers keep attempting to sync in the background, so retrying doesn't interrupt them.
|
||||
// the call to resyncMonitors on the reattempt will no-op for resources that still exist.
|
||||
// note that workers stay paused until we successfully resync.
|
||||
if !controller.WaitForCacheSync("garbage collector", waitForStopOrTimeout(stopCh, period), gc.dependencyGraphBuilder.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync (attempt %d)", attempt))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// success, break out of the loop
|
||||
return true, nil
|
||||
}, stopCh)
|
||||
|
||||
// Finally, keep track of our new state. Do this after all preceding steps
|
||||
// have succeeded to ensure we'll retry on subsequent syncs if an error
|
||||
@ -220,6 +246,36 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p
|
||||
}, period, stopCh)
|
||||
}
|
||||
|
||||
// printDiff returns a human-readable summary of what resources were added and removed
|
||||
func printDiff(oldResources, newResources map[schema.GroupVersionResource]struct{}) string {
|
||||
removed := sets.NewString()
|
||||
for oldResource := range oldResources {
|
||||
if _, ok := newResources[oldResource]; !ok {
|
||||
removed.Insert(fmt.Sprintf("%+v", oldResource))
|
||||
}
|
||||
}
|
||||
added := sets.NewString()
|
||||
for newResource := range newResources {
|
||||
if _, ok := oldResources[newResource]; !ok {
|
||||
added.Insert(fmt.Sprintf("%+v", newResource))
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("added: %v, removed: %v", added.List(), removed.List())
|
||||
}
|
||||
|
||||
// waitForStopOrTimeout returns a stop channel that closes when the provided stop channel closes or when the specified timeout is reached
|
||||
func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan struct{} {
|
||||
stopChWithTimeout := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-stopCh:
|
||||
case <-time.After(timeout):
|
||||
}
|
||||
close(stopChWithTimeout)
|
||||
}()
|
||||
return stopChWithTimeout
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) IsSynced() bool {
|
||||
return gc.dependencyGraphBuilder.IsSynced()
|
||||
}
|
||||
@ -283,19 +339,15 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
|
||||
// ii) should update the object to remove such references. This is to
|
||||
// prevent objects having references to an old resource from being
|
||||
// deleted during a cluster upgrade.
|
||||
fqKind := schema.FromAPIVersionAndKind(reference.APIVersion, reference.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(reference.APIVersion, reference.Kind)
|
||||
resource, namespaced, err := gc.apiResource(reference.APIVersion, reference.Kind)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// TODO: It's only necessary to talk to the API server if the owner node
|
||||
// is a "virtual" node. The local graph could lag behind the real
|
||||
// status, but in practice, the difference is small.
|
||||
owner, err = client.Resource(resource, item.identity.Namespace).Get(reference.Name, metav1.GetOptions{})
|
||||
owner, err = gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
gc.absentOwnerCache.Add(reference.UID)
|
||||
@ -416,8 +468,11 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// waitingForDependentsDeletion needs to be deleted from the
|
||||
// ownerReferences, otherwise the referenced objects will be stuck with
|
||||
// the FinalizerDeletingDependents and never get deleted.
|
||||
patch := deleteOwnerRefPatch(item.identity.UID, append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)...)
|
||||
_, err = gc.patchObject(item.identity, patch)
|
||||
ownerUIDs := append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)
|
||||
patch := deleteOwnerRefStrategicMergePatch(item.identity.UID, ownerUIDs...)
|
||||
_, err = gc.patch(item, patch, func(n *node) ([]byte, error) {
|
||||
return gc.deleteOwnerRefJSONMergePatch(n, ownerUIDs...)
|
||||
})
|
||||
return err
|
||||
case len(waitingForDependentsDeletion) != 0 && item.dependentsLength() != 0:
|
||||
deps := item.getDependents()
|
||||
@ -429,11 +484,11 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// there are multiple workers run attemptToDeleteItem in
|
||||
// parallel, the circle detection can fail in a race condition.
|
||||
glog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
|
||||
patch, err := item.patchToUnblockOwnerReferences()
|
||||
patch, err := item.unblockOwnerReferencesStrategicMergePatch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := gc.patchObject(item.identity, patch); err != nil {
|
||||
if _, err := gc.patch(item, patch, gc.unblockOwnerReferencesJSONMergePatch); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
@ -493,8 +548,10 @@ func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents [
|
||||
go func(dependent *node) {
|
||||
defer wg.Done()
|
||||
// the dependent.identity.UID is used as precondition
|
||||
patch := deleteOwnerRefPatch(dependent.identity.UID, owner.UID)
|
||||
_, err := gc.patchObject(dependent.identity, patch)
|
||||
patch := deleteOwnerRefStrategicMergePatch(dependent.identity.UID, owner.UID)
|
||||
_, err := gc.patch(dependent, patch, func(n *node) ([]byte, error) {
|
||||
return gc.deleteOwnerRefJSONMergePatch(n, owner.UID)
|
||||
})
|
||||
// note that if the target ownerReference doesn't exist in the
|
||||
// dependent, strategic merge patch will NOT return an error.
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
|
242
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
242
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@ -31,9 +32,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -41,11 +42,11 @@ import (
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
)
|
||||
|
||||
type testRESTMapper struct {
|
||||
@ -56,12 +57,13 @@ func (_ *testRESTMapper) Reset() {}
|
||||
|
||||
func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
config := &restclient.Config{}
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
tweakableRM := meta.NewDefaultRESTMapper(nil, nil)
|
||||
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, legacyscheme.Registry.RESTMapper()}}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc)
|
||||
tweakableRM := meta.NewDefaultRESTMapper(nil)
|
||||
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
podResource := map[schema.GroupVersionResource]struct{}{
|
||||
{Version: "v1", Resource: "pods"}: {},
|
||||
}
|
||||
@ -76,7 +78,7 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
// construction will not fail.
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||
gc, err := NewGarbageCollector(dynamicClient, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -142,18 +144,34 @@ type fakeActionHandler struct {
|
||||
|
||||
// ServeHTTP logs the action that occurred and always returns the associated status code
|
||||
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
func() {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
|
||||
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
||||
if !ok {
|
||||
fakeResponse.statusCode = 200
|
||||
fakeResponse.content = []byte("{\"kind\": \"List\"}")
|
||||
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
|
||||
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
||||
if !ok {
|
||||
fakeResponse.statusCode = 200
|
||||
fakeResponse.content = []byte("{\"kind\": \"List\"}")
|
||||
}
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(fakeResponse.statusCode)
|
||||
response.Write(fakeResponse.content)
|
||||
}()
|
||||
|
||||
// This is to allow the fakeActionHandler to simulate a watch being opened
|
||||
if strings.Contains(request.URL.RawQuery, "watch=true") {
|
||||
hijacker, ok := response.(http.Hijacker)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
connection, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer connection.Close()
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(fakeResponse.statusCode)
|
||||
response.Write(fakeResponse.content)
|
||||
}
|
||||
|
||||
// testServerAndClientConfig returns a server that listens and a config that can reference it
|
||||
@ -171,16 +189,17 @@ type garbageCollector struct {
|
||||
}
|
||||
|
||||
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
podResource := map[schema.GroupVersionResource]struct{}{{Version: "v1", Resource: "pods"}: {}}
|
||||
client := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, &testRESTMapper{legacyscheme.Registry.RESTMapper()}, podResource, ignoredResources, sharedInformers, alwaysStarted)
|
||||
gc, err := NewGarbageCollector(dynamicClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -415,13 +434,13 @@ func TestGCListWatcher(t *testing.T) {
|
||||
testHandler := &fakeActionHandler{}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
clientPool := dynamic.NewClientPool(clientConfig, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
podResource := schema.GroupVersionResource{Version: "v1", Resource: "pods"}
|
||||
client, err := clientPool.ClientForGroupVersionResource(podResource)
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lw := listWatcher(client, podResource)
|
||||
|
||||
lw := listWatcher(dynamicClient, podResource)
|
||||
lw.DisableChunking = true
|
||||
if _, err := lw.Watch(metav1.ListOptions{ResourceVersion: "1"}); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -574,7 +593,7 @@ func TestDeleteOwnerRefPatch(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
patch := deleteOwnerRefPatch("100", "2", "3")
|
||||
patch := deleteOwnerRefStrategicMergePatch("100", "2", "3")
|
||||
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -619,7 +638,7 @@ func TestUnblockOwnerReference(t *testing.T) {
|
||||
n := node{
|
||||
owners: accessor.GetOwnerReferences(),
|
||||
}
|
||||
patch, err := n.patchToUnblockOwnerReferences()
|
||||
patch, err := n.unblockOwnerReferencesStrategicMergePatch()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -668,9 +687,13 @@ func TestOrphanDependentsFailure(t *testing.T) {
|
||||
},
|
||||
}
|
||||
err := gc.orphanDependents(objectReference{}, dependents)
|
||||
expected := `the server reported a conflict (patch pods pod)`
|
||||
expected := `the server reported a conflict`
|
||||
if err == nil || !strings.Contains(err.Error(), expected) {
|
||||
t.Errorf("expected error contains text %s, got %v", expected, err)
|
||||
if err != nil {
|
||||
t.Errorf("expected error contains text %q, got %q", expected, err.Error())
|
||||
} else {
|
||||
t.Errorf("expected error contains text %q, got nil", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -766,9 +789,145 @@ func TestGetDeletableResources(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestGarbageCollectorSync ensures that a discovery client error
|
||||
// will not cause the garbage collector to block infinitely.
|
||||
func TestGarbageCollectorSync(t *testing.T) {
|
||||
serverResources := []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsyncableServerResources := []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
{Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDiscoveryClient := &fakeServerResources{
|
||||
PreferredResources: serverResources,
|
||||
Error: nil,
|
||||
Lock: sync.Mutex{},
|
||||
InterfaceUsedCount: 0,
|
||||
}
|
||||
|
||||
testHandler := &fakeActionHandler{
|
||||
response: map[string]FakeResponse{
|
||||
"GET" + "/api/v1/pods": {
|
||||
200,
|
||||
[]byte("{}"),
|
||||
},
|
||||
"GET" + "/api/v1/secrets": {
|
||||
404,
|
||||
[]byte("{}"),
|
||||
},
|
||||
},
|
||||
}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
clientConfig.ContentConfig.NegotiatedSerializer = nil
|
||||
client, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
podResource := map[schema.GroupVersionResource]struct{}{
|
||||
{Group: "", Version: "v1", Resource: "pods"}: {},
|
||||
}
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := NewGarbageCollector(dynamicClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go gc.Run(1, stopCh)
|
||||
go gc.Sync(fakeDiscoveryClient, 10*time.Millisecond, stopCh)
|
||||
|
||||
// Wait until the sync discovers the initial resources
|
||||
fmt.Printf("Test output")
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
|
||||
}
|
||||
|
||||
// Simulate the discovery client returning an error
|
||||
fakeDiscoveryClient.setPreferredResources(nil)
|
||||
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
|
||||
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Remove the error from being returned and see if the garbage collector sync is still working
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
|
||||
// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
|
||||
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Put the resources back to normal and ensure garbage collector sync recovers
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
|
||||
before := fakeDiscoveryClient.getInterfaceUsedCount()
|
||||
t := 1 * time.Second
|
||||
time.Sleep(t)
|
||||
after := fakeDiscoveryClient.getInterfaceUsedCount()
|
||||
if before == after {
|
||||
return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
|
||||
}
|
||||
|
||||
workerLockAcquired := make(chan struct{})
|
||||
go func() {
|
||||
workerLock.Lock()
|
||||
workerLock.Unlock()
|
||||
close(workerLockAcquired)
|
||||
}()
|
||||
select {
|
||||
case <-workerLockAcquired:
|
||||
return nil
|
||||
case <-time.After(t):
|
||||
return fmt.Errorf("workerLock blocked for at least %v", t)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeServerResources struct {
|
||||
PreferredResources []*metav1.APIResourceList
|
||||
Error error
|
||||
Lock sync.Mutex
|
||||
InterfaceUsedCount int
|
||||
}
|
||||
|
||||
func (_ *fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
|
||||
@ -780,9 +939,30 @@ func (_ *fakeServerResources) ServerResources() ([]*metav1.APIResourceList, erro
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.InterfaceUsedCount++
|
||||
return f.PreferredResources, f.Error
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.PreferredResources = resources
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) setError(err error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.Error = err
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) getInterfaceUsedCount() int {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
return f.InterfaceUsedCount
|
||||
}
|
||||
|
||||
func (_ *fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go
generated
vendored
@ -78,7 +78,7 @@ type GraphBuilder struct {
|
||||
// each monitor list/watches a resource, the results are funneled to the
|
||||
// dependencyGraphBuilder
|
||||
monitors monitors
|
||||
monitorLock sync.Mutex
|
||||
monitorLock sync.RWMutex
|
||||
// informersStarted is closed after after all of the controllers have been initialized and are running.
|
||||
// After that it is safe to start them here, before that it is not.
|
||||
informersStarted <-chan struct{}
|
||||
@ -91,9 +91,7 @@ type GraphBuilder struct {
|
||||
// it is protected by monitorLock.
|
||||
running bool
|
||||
|
||||
// metaOnlyClientPool uses a special codec, which removes fields except for
|
||||
// apiVersion, kind, and metadata during decoding.
|
||||
metaOnlyClientPool dynamic.ClientPool
|
||||
dynamicClient dynamic.Interface
|
||||
// monitors are the producer of the graphChanges queue, graphBuilder alters
|
||||
// the in-memory graph according to the changes.
|
||||
graphChanges workqueue.RateLimitingInterface
|
||||
@ -113,6 +111,7 @@ type GraphBuilder struct {
|
||||
// monitor runs a Controller with a local stop channel.
|
||||
type monitor struct {
|
||||
controller cache.Controller
|
||||
store cache.Store
|
||||
|
||||
// stopCh stops Controller. If stopCh is nil, the monitor is considered to be
|
||||
// not yet started.
|
||||
@ -130,29 +129,17 @@ type monitors map[schema.GroupVersionResource]*monitor
|
||||
func listWatcher(client dynamic.Interface, resource schema.GroupVersionResource) *cache.ListWatch {
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, metav1.NamespaceAll).
|
||||
List(options)
|
||||
// We want to list this resource in all namespaces if it's namespace scoped, so not passing namespace is ok.
|
||||
return client.Resource(resource).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, metav1.NamespaceAll).
|
||||
Watch(options)
|
||||
// We want to list this resource in all namespaces if it's namespace scoped, so not passing namespace is ok.
|
||||
return client.Resource(resource).Watch(options)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, error) {
|
||||
func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, cache.Store, error) {
|
||||
handlers := cache.ResourceEventHandlerFuncs{
|
||||
// add the event to the dependencyGraphBuilder's graphChanges.
|
||||
AddFunc: func(obj interface{}) {
|
||||
@ -192,25 +179,21 @@ func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind
|
||||
glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
|
||||
// need to clone because it's from a shared cache
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
|
||||
return shared.Informer().GetController(), nil
|
||||
return shared.Informer().GetController(), shared.Informer().GetStore(), nil
|
||||
} else {
|
||||
glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
|
||||
}
|
||||
|
||||
// TODO: consider store in one storage.
|
||||
glog.V(5).Infof("create storage for resource %s", resource)
|
||||
client, err := gb.metaOnlyClientPool.ClientForGroupVersionKind(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, monitor := cache.NewInformer(
|
||||
listWatcher(client, resource),
|
||||
store, monitor := cache.NewInformer(
|
||||
listWatcher(gb.dynamicClient, resource),
|
||||
nil,
|
||||
ResourceResyncTime,
|
||||
// don't need to clone because it's not from shared cache
|
||||
handlers,
|
||||
)
|
||||
return monitor, nil
|
||||
return monitor, store, nil
|
||||
}
|
||||
|
||||
// syncMonitors rebuilds the monitor set according to the supplied resources,
|
||||
@ -246,12 +229,12 @@ func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]s
|
||||
errs = append(errs, fmt.Errorf("couldn't look up resource %q: %v", resource, err))
|
||||
continue
|
||||
}
|
||||
c, err := gb.controllerFor(resource, kind)
|
||||
c, s, err := gb.controllerFor(resource, kind)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err))
|
||||
continue
|
||||
}
|
||||
current[resource] = &monitor{controller: c}
|
||||
current[resource] = &monitor{store: s, controller: c}
|
||||
added++
|
||||
}
|
||||
gb.monitors = current
|
||||
@ -306,11 +289,13 @@ func (gb *GraphBuilder) IsSynced() bool {
|
||||
defer gb.monitorLock.Unlock()
|
||||
|
||||
if len(gb.monitors) == 0 {
|
||||
glog.V(4).Info("garbage controller monitor not synced: no monitors")
|
||||
return false
|
||||
}
|
||||
|
||||
for _, monitor := range gb.monitors {
|
||||
for resource, monitor := range gb.monitors {
|
||||
if !monitor.controller.HasSynced() {
|
||||
glog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -361,8 +346,6 @@ var ignoredResources = map[schema.GroupResource]struct{}{
|
||||
{Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}: {},
|
||||
{Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: {},
|
||||
{Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}: {},
|
||||
{Group: "apiregistration.k8s.io", Resource: "apiservices"}: {},
|
||||
{Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}: {},
|
||||
}
|
||||
|
||||
// DefaultIgnoredResources returns the default set of resources that the garbage collector controller
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
@ -3,38 +3,18 @@ package(default_visibility = ["//visibility:public"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"metaonly.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["metaonly_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
66
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
66
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
type metaOnlyJSONScheme struct{}
|
||||
|
||||
// This function can be extended to mapping different gvk to different MetadataOnlyObject,
|
||||
// which embedded with different version of ObjectMeta. Currently the system
|
||||
// only supports metav1.ObjectMeta.
|
||||
func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object {
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
return &MetadataOnlyObjectList{}
|
||||
} else {
|
||||
return &MetadataOnlyObject{}
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetadataCodecFactory() serializer.CodecFactory {
|
||||
// populating another scheme from legacyscheme.Scheme, registering every kind with
|
||||
// MetadataOnlyObject (or MetadataOnlyObjectList).
|
||||
scheme := runtime.NewScheme()
|
||||
allTypes := legacyscheme.Scheme.AllKnownTypes()
|
||||
for kind := range allTypes {
|
||||
if kind.Version == runtime.APIVersionInternal {
|
||||
continue
|
||||
}
|
||||
if kind == metav1.Unversioned.WithKind("Status") {
|
||||
// this is added below as unversioned
|
||||
continue
|
||||
}
|
||||
metaOnlyObject := gvkToMetadataOnlyObject(kind)
|
||||
scheme.AddKnownTypeWithName(kind, metaOnlyObject)
|
||||
}
|
||||
scheme.AddUnversionedTypes(metav1.Unversioned, &metav1.Status{})
|
||||
return serializer.NewCodecFactory(scheme)
|
||||
}
|
||||
|
||||
// String converts a MetadataOnlyObject to a human-readable string.
|
||||
func (metaOnly MetadataOnlyObject) String() string {
|
||||
return fmt.Sprintf("%s/%s, name: %s, DeletionTimestamp:%v", metaOnly.TypeMeta.APIVersion, metaOnly.TypeMeta.Kind, metaOnly.ObjectMeta.Name, metaOnly.ObjectMeta.DeletionTimestamp)
|
||||
}
|
164
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
164
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
)
|
||||
|
||||
func getPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: "1234"},
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodJson(t *testing.T) []byte {
|
||||
data, err := json.Marshal(getPod())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func getPodListJson(t *testing.T) []byte {
|
||||
data, err := json.Marshal(&v1.PodList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PodList",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Items: []v1.Pod{
|
||||
*getPod(),
|
||||
*getPod(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func verfiyMetadata(description string, t *testing.T, in *MetadataOnlyObject) {
|
||||
pod := getPod()
|
||||
if e, a := pod.ObjectMeta, in.ObjectMeta; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%s: expected %#v, got %#v", description, e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToMetadataOnlyObject(t *testing.T) {
|
||||
data := getPodJson(t)
|
||||
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
|
||||
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("expected to get a JSON serializer")
|
||||
}
|
||||
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
|
||||
// decode with into
|
||||
into := &MetadataOnlyObject{}
|
||||
ret, _, err := codec.Decode(data, nil, into)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnly, ok := ret.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check returned metaonly with into", t, metaOnly)
|
||||
verfiyMetadata("check into", t, into)
|
||||
// decode without into
|
||||
ret, _, err = codec.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnly, ok = ret.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check returned metaonly without into", t, metaOnly)
|
||||
}
|
||||
|
||||
func verifyListMetadata(t *testing.T, metaOnlyList *MetadataOnlyObjectList) {
|
||||
items, err := meta.ExtractList(metaOnlyList)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, item := range items {
|
||||
metaOnly, ok := item.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected item to be *MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check list", t, metaOnly)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToMetadataOnlyObjectList(t *testing.T) {
|
||||
data := getPodListJson(t)
|
||||
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
|
||||
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("expected to get a JSON serializer")
|
||||
}
|
||||
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
|
||||
// decode with into
|
||||
into := &MetadataOnlyObjectList{}
|
||||
ret, _, err := codec.Decode(data, nil, into)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnlyList, ok := ret.(*MetadataOnlyObjectList)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.UnstructuredList")
|
||||
}
|
||||
verifyListMetadata(t, metaOnlyList)
|
||||
verifyListMetadata(t, into)
|
||||
// decode without into
|
||||
ret, _, err = codec.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnlyList, ok = ret.(*MetadataOnlyObjectList)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.UnstructuredList")
|
||||
}
|
||||
verifyListMetadata(t, metaOnlyList)
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/operations.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/operations.go
generated
vendored
@ -30,76 +30,58 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// cluster scoped resources don't have namespaces. Default to the item's namespace, but clear it for cluster scoped resources
|
||||
func resourceDefaultNamespace(namespaced bool, defaultNamespace string) string {
|
||||
if namespaced {
|
||||
return defaultNamespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// apiResource consults the REST mapper to translate an <apiVersion, kind,
|
||||
// namespace> tuple to a unversioned.APIResource struct.
|
||||
func (gc *GarbageCollector) apiResource(apiVersion, kind string) (*metav1.APIResource, error) {
|
||||
func (gc *GarbageCollector) apiResource(apiVersion, kind string) (schema.GroupVersionResource, bool, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(apiVersion, kind)
|
||||
mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version)
|
||||
if err != nil {
|
||||
return nil, newRESTMappingError(kind, apiVersion)
|
||||
return schema.GroupVersionResource{}, false, newRESTMappingError(kind, apiVersion)
|
||||
}
|
||||
glog.V(5).Infof("map kind %s, version %s to resource %s", kind, apiVersion, mapping.Resource)
|
||||
resource := metav1.APIResource{
|
||||
Name: mapping.Resource,
|
||||
Namespaced: mapping.Scope == meta.RESTScopeNamespace,
|
||||
Kind: kind,
|
||||
}
|
||||
return &resource, nil
|
||||
return mapping.Resource, mapping.Scope == meta.RESTScopeNamespace, nil
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) deleteObject(item objectReference, policy *metav1.DeletionPropagation) error {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid := item.UID
|
||||
preconditions := metav1.Preconditions{UID: &uid}
|
||||
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: policy}
|
||||
return client.Resource(resource, item.Namespace).Delete(item.Name, &deleteOptions)
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Delete(item.Name, &deleteOptions)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) getObject(item objectReference) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Get(item.Name, metav1.GetOptions{})
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Get(item.Name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) updateObject(item objectReference, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Update(obj)
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Update(obj)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*unstructured.Unstructured, error) {
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Patch(item.Name, types.StrategicMergePatchType, patch)
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch)
|
||||
}
|
||||
|
||||
// TODO: Using Patch when strategicmerge supports deleting an entry from a
|
||||
|
117
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/patch.go
generated
vendored
117
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/patch.go
generated
vendored
@ -21,12 +21,16 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
)
|
||||
|
||||
func deleteOwnerRefPatch(dependentUID types.UID, ownerUIDs ...types.UID) []byte {
|
||||
func deleteOwnerRefStrategicMergePatch(dependentUID types.UID, ownerUIDs ...types.UID) []byte {
|
||||
var pieces []string
|
||||
for _, ownerUID := range ownerUIDs {
|
||||
pieces = append(pieces, fmt.Sprintf(`{"$patch":"delete","uid":"%s"}`, ownerUID))
|
||||
@ -35,9 +39,97 @@ func deleteOwnerRefPatch(dependentUID types.UID, ownerUIDs ...types.UID) []byte
|
||||
return []byte(patch)
|
||||
}
|
||||
|
||||
// generate a patch that unsets the BlockOwnerDeletion field of all
|
||||
// getMetadata tries getting object metadata from local cache, and sends GET request to apiserver when
|
||||
// local cache is not available or not latest.
|
||||
func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string) (metav1.Object, error) {
|
||||
apiResource, _, err := gc.apiResource(apiVersion, kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gc.dependencyGraphBuilder.monitorLock.RLock()
|
||||
defer gc.dependencyGraphBuilder.monitorLock.RUnlock()
|
||||
m, ok := gc.dependencyGraphBuilder.monitors[apiResource]
|
||||
if !ok || m == nil {
|
||||
// If local cache doesn't exist for mapping.Resource, send a GET request to API server
|
||||
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
key := name
|
||||
if len(namespace) != 0 {
|
||||
key = namespace + "/" + name
|
||||
}
|
||||
raw, exist, err := m.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
// If local cache doesn't contain the object, send a GET request to API server
|
||||
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
obj, ok := raw.(runtime.Object)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expect a runtime.Object, got %v", raw)
|
||||
}
|
||||
return meta.Accessor(obj)
|
||||
}
|
||||
|
||||
type objectForPatch struct {
|
||||
ObjectMetaForPatch `json:"metadata"`
|
||||
}
|
||||
|
||||
type ObjectMetaForPatch struct {
|
||||
ResourceVersion string `json:"resourceVersion"`
|
||||
OwnerReferences []metav1.OwnerReference `json:"ownerReferences"`
|
||||
}
|
||||
|
||||
// jsonMergePatchFunc defines the interface for functions that construct json merge patches that manipulate
|
||||
// owner reference array.
|
||||
type jsonMergePatchFunc func(*node) ([]byte, error)
|
||||
|
||||
// patch tries strategic merge patch on item first, and if SMP is not supported, it fallbacks to JSON merge
|
||||
// patch.
|
||||
func (gc *GarbageCollector) patch(item *node, smp []byte, jmp jsonMergePatchFunc) (*unstructured.Unstructured, error) {
|
||||
smpResult, err := gc.patchObject(item.identity, smp, types.StrategicMergePatchType)
|
||||
if err == nil {
|
||||
return smpResult, nil
|
||||
}
|
||||
if !errors.IsUnsupportedMediaType(err) {
|
||||
return nil, err
|
||||
}
|
||||
// StrategicMergePatch is not supported, use JSON merge patch instead
|
||||
patch, err := jmp(item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gc.patchObject(item.identity, patch, types.MergePatchType)
|
||||
}
|
||||
|
||||
// Returns JSON merge patch that removes the ownerReferences matching ownerUIDs.
|
||||
func (gc *GarbageCollector) deleteOwnerRefJSONMergePatch(item *node, ownerUIDs ...types.UID) ([]byte, error) {
|
||||
accessor, err := gc.getMetadata(item.identity.APIVersion, item.identity.Kind, item.identity.Namespace, item.identity.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedObjectMeta := ObjectMetaForPatch{}
|
||||
expectedObjectMeta.ResourceVersion = accessor.GetResourceVersion()
|
||||
refs := accessor.GetOwnerReferences()
|
||||
for _, ref := range refs {
|
||||
var skip bool
|
||||
for _, ownerUID := range ownerUIDs {
|
||||
if ref.UID == ownerUID {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
expectedObjectMeta.OwnerReferences = append(expectedObjectMeta.OwnerReferences, ref)
|
||||
}
|
||||
}
|
||||
return json.Marshal(objectForPatch{expectedObjectMeta})
|
||||
}
|
||||
|
||||
// Generate a patch that unsets the BlockOwnerDeletion field of all
|
||||
// ownerReferences of node.
|
||||
func (n *node) patchToUnblockOwnerReferences() ([]byte, error) {
|
||||
func (n *node) unblockOwnerReferencesStrategicMergePatch() ([]byte, error) {
|
||||
var dummy metaonly.MetadataOnlyObject
|
||||
var blockingRefs []metav1.OwnerReference
|
||||
falseVar := false
|
||||
@ -52,3 +144,22 @@ func (n *node) patchToUnblockOwnerReferences() ([]byte, error) {
|
||||
dummy.ObjectMeta.UID = n.identity.UID
|
||||
return json.Marshal(dummy)
|
||||
}
|
||||
|
||||
// Generate a JSON merge patch that unsets the BlockOwnerDeletion field of all
|
||||
// ownerReferences of node.
|
||||
func (gc *GarbageCollector) unblockOwnerReferencesJSONMergePatch(n *node) ([]byte, error) {
|
||||
accessor, err := gc.getMetadata(n.identity.APIVersion, n.identity.Kind, n.identity.Namespace, n.identity.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedObjectMeta := ObjectMetaForPatch{}
|
||||
expectedObjectMeta.ResourceVersion = accessor.GetResourceVersion()
|
||||
var expectedOwners []metav1.OwnerReference
|
||||
falseVar := false
|
||||
for _, owner := range n.owners {
|
||||
owner.BlockOwnerDeletion = &falseVar
|
||||
expectedOwners = append(expectedOwners, owner)
|
||||
}
|
||||
expectedObjectMeta.OwnerReferences = expectedOwners
|
||||
return json.Marshal(objectForPatch{expectedObjectMeta})
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/history/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/history/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_test(
|
||||
srcs = ["controller_history_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/history/controller_history_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/history/controller_history_test.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
@ -966,7 +967,7 @@ func TestRealHistory_AdoptControllerRevision(t *testing.T) {
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
patched, err := testapi.Apps.Converter().ConvertToVersion(obj, apps.SchemeGroupVersion)
|
||||
patched, err := legacyscheme.Scheme.ConvertToVersion(obj, apps.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
@ -1217,7 +1218,7 @@ func TestRealHistory_ReleaseControllerRevision(t *testing.T) {
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
patched, err := testapi.Apps.Converter().ConvertToVersion(obj, apps.SchemeGroupVersion)
|
||||
patched, err := legacyscheme.Scheme.ConvertToVersion(obj, apps.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
@ -47,13 +47,13 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
@ -64,6 +64,7 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
100
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
@ -48,6 +48,8 @@ import (
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const statusUpdateRetries = 3
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
var controllerKind = batch.SchemeGroupVersion.WithKind("Job")
|
||||
|
||||
@ -90,8 +92,7 @@ type JobController struct {
|
||||
func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -109,9 +110,13 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
|
||||
}
|
||||
|
||||
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: jm.enqueueController,
|
||||
AddFunc: func(obj interface{}) {
|
||||
jm.enqueueController(obj, true)
|
||||
},
|
||||
UpdateFunc: jm.updateJob,
|
||||
DeleteFunc: jm.enqueueController,
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
jm.enqueueController(obj, true)
|
||||
},
|
||||
})
|
||||
jm.jobLister = jobInformer.Lister()
|
||||
jm.jobStoreSynced = jobInformer.Informer().HasSynced
|
||||
@ -209,7 +214,7 @@ func (jm *JobController) addPod(obj interface{}) {
|
||||
return
|
||||
}
|
||||
jm.expectations.CreationObserved(jobKey)
|
||||
jm.enqueueController(job)
|
||||
jm.enqueueController(job, true)
|
||||
return
|
||||
}
|
||||
|
||||
@ -218,7 +223,7 @@ func (jm *JobController) addPod(obj interface{}) {
|
||||
// DO NOT observe creation because no controller should be waiting for an
|
||||
// orphan.
|
||||
for _, job := range jm.getPodJobs(pod) {
|
||||
jm.enqueueController(job)
|
||||
jm.enqueueController(job, true)
|
||||
}
|
||||
}
|
||||
|
||||
@ -242,7 +247,8 @@ func (jm *JobController) updatePod(old, cur interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
// the only time we want the backoff to kick-in, is when the pod failed
|
||||
immediate := curPod.Status.Phase != v1.PodFailed
|
||||
|
||||
curControllerRef := metav1.GetControllerOf(curPod)
|
||||
oldControllerRef := metav1.GetControllerOf(oldPod)
|
||||
@ -250,7 +256,7 @@ func (jm *JobController) updatePod(old, cur interface{}) {
|
||||
if controllerRefChanged && oldControllerRef != nil {
|
||||
// The ControllerRef was changed. Sync the old controller, if any.
|
||||
if job := jm.resolveControllerRef(oldPod.Namespace, oldControllerRef); job != nil {
|
||||
jm.enqueueController(job)
|
||||
jm.enqueueController(job, immediate)
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,15 +266,16 @@ func (jm *JobController) updatePod(old, cur interface{}) {
|
||||
if job == nil {
|
||||
return
|
||||
}
|
||||
jm.enqueueController(job)
|
||||
jm.enqueueController(job, immediate)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, it's an orphan. If anything changed, sync matching controllers
|
||||
// to see if anyone wants to adopt it now.
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
for _, job := range jm.getPodJobs(curPod) {
|
||||
jm.enqueueController(job)
|
||||
jm.enqueueController(job, immediate)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -309,7 +316,7 @@ func (jm *JobController) deletePod(obj interface{}) {
|
||||
return
|
||||
}
|
||||
jm.expectations.DeletionObserved(jobKey)
|
||||
jm.enqueueController(job)
|
||||
jm.enqueueController(job, true)
|
||||
}
|
||||
|
||||
func (jm *JobController) updateJob(old, cur interface{}) {
|
||||
@ -321,7 +328,7 @@ func (jm *JobController) updateJob(old, cur interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jm.enqueueController(curJob)
|
||||
jm.enqueueController(curJob, true)
|
||||
// check if need to add a new rsync for ActiveDeadlineSeconds
|
||||
if curJob.Status.StartTime != nil {
|
||||
curADS := curJob.Spec.ActiveDeadlineSeconds
|
||||
@ -341,16 +348,20 @@ func (jm *JobController) updateJob(old, cur interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item.
|
||||
func (jm *JobController) enqueueController(job interface{}) {
|
||||
key, err := controller.KeyFunc(job)
|
||||
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item,
|
||||
// immediate tells the controller to update the status right away, and should
|
||||
// happen ONLY when there was a successful pod run.
|
||||
func (jm *JobController) enqueueController(obj interface{}, immediate bool) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", job, err))
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieves the backoff duration for this Job
|
||||
backoff := getBackoff(jm.queue, key)
|
||||
backoff := time.Duration(0)
|
||||
if !immediate {
|
||||
backoff = getBackoff(jm.queue, key)
|
||||
}
|
||||
|
||||
// TODO: Handle overlapping controllers better. Either disallow them at admission time or
|
||||
// deterministically avoid syncing controllers that fight over pods. Currently, we only
|
||||
@ -425,7 +436,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
|
||||
func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -486,12 +497,18 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
var failureMessage string
|
||||
|
||||
jobHaveNewFailure := failed > job.Status.Failed
|
||||
// new failures happen when status does not reflect the failures and active
|
||||
// is different than parallelism, otherwise the previous controller loop
|
||||
// failed updating status so even if we pick up failure it is not a new one
|
||||
exceedsBackoffLimit := jobHaveNewFailure && (active != *job.Spec.Parallelism) &&
|
||||
(int32(previousRetry)+1 > *job.Spec.BackoffLimit)
|
||||
|
||||
// check if the number of failed jobs increased since the last syncJob
|
||||
if jobHaveNewFailure && (int32(previousRetry)+1 > *job.Spec.BackoffLimit) {
|
||||
if exceedsBackoffLimit || pastBackoffLimitOnFailure(&job, pods) {
|
||||
// check if the number of pod restart exceeds backoff (for restart OnFailure only)
|
||||
// OR if the number of failed jobs increased since the last syncJob
|
||||
jobFailed = true
|
||||
failureReason = "BackoffLimitExceeded"
|
||||
failureMessage = "Job has reach the specified backoff limit"
|
||||
failureMessage = "Job has reached the specified backoff limit"
|
||||
} else if pastActiveDeadline(&job) {
|
||||
jobFailed = true
|
||||
failureReason = "DeadlineExceeded"
|
||||
@ -605,6 +622,30 @@ func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh cha
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
// pastBackoffLimitOnFailure checks if container restartCounts sum exceeds BackoffLimit
|
||||
// this method applies only to pods with restartPolicy == OnFailure
|
||||
func pastBackoffLimitOnFailure(job *batch.Job, pods []*v1.Pod) bool {
|
||||
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
|
||||
return false
|
||||
}
|
||||
result := int32(0)
|
||||
for i := range pods {
|
||||
po := pods[i]
|
||||
if po.Status.Phase != v1.PodRunning {
|
||||
continue
|
||||
}
|
||||
for j := range po.Status.InitContainerStatuses {
|
||||
stat := po.Status.InitContainerStatuses[j]
|
||||
result += stat.RestartCount
|
||||
}
|
||||
for j := range po.Status.ContainerStatuses {
|
||||
stat := po.Status.ContainerStatuses[j]
|
||||
result += stat.RestartCount
|
||||
}
|
||||
}
|
||||
return result >= *job.Spec.BackoffLimit
|
||||
}
|
||||
|
||||
// pastActiveDeadline checks if job has ActiveDeadlineSeconds field set and if it is exceeded.
|
||||
func pastActiveDeadline(job *batch.Job) bool {
|
||||
if job.Spec.ActiveDeadlineSeconds == nil || job.Status.StartTime == nil {
|
||||
@ -778,7 +819,20 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
}
|
||||
|
||||
func (jm *JobController) updateJobStatus(job *batch.Job) error {
|
||||
_, err := jm.kubeClient.BatchV1().Jobs(job.Namespace).UpdateStatus(job)
|
||||
jobClient := jm.kubeClient.BatchV1().Jobs(job.Namespace)
|
||||
var err error
|
||||
for i := 0; i <= statusUpdateRetries; i = i + 1 {
|
||||
var newJob *batch.Job
|
||||
newJob, err = jobClient.Get(job.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
newJob.Status = job.Status
|
||||
if _, err = jobClient.UpdateStatus(newJob); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
205
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller_test.go
generated
vendored
205
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller_test.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -36,7 +37,7 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
@ -268,7 +269,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
nil, true, 0, 0, 0, 0,
|
||||
10, 0, 10, 0, 0, nil, "",
|
||||
},
|
||||
"to many job sync failure": {
|
||||
"too many job failures": {
|
||||
2, 5, 0, true, 0,
|
||||
nil, true, 0, 0, 0, 1,
|
||||
0, 0, 0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
|
||||
@ -277,7 +278,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{Err: tc.podControllerError, CreateLimit: tc.podLimit}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -411,14 +412,14 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
},
|
||||
"activeDeadlineSeconds with backofflimit reach": {
|
||||
1, 1, 1, 10, 0,
|
||||
1, 0, 2,
|
||||
true, 1, 0, 0, 3, "BackoffLimitExceeded",
|
||||
0, 0, 1,
|
||||
true, 0, 0, 0, 1, "BackoffLimitExceeded",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -484,7 +485,7 @@ func getCondition(job *batch.Job, condition batch.JobConditionType, reason strin
|
||||
}
|
||||
|
||||
func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -522,7 +523,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncJobComplete(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -550,7 +551,7 @@ func TestSyncJobComplete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncJobDeleted(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, _ := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -574,7 +575,7 @@ func TestSyncJobDeleted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
DefaultJobBackOff = time.Duration(0) // overwrite the default value for testing
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
@ -604,7 +605,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJobPodLookup(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
@ -686,7 +687,7 @@ func TestJobPodLookup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodsForJob(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -822,7 +823,7 @@ func TestGetPodsForJobNoAdoptIfBeingDeletedRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodsForJobRelease(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -851,7 +852,7 @@ func TestGetPodsForJobRelease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddPod(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -896,7 +897,7 @@ func TestAddPod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddPodOrphan(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -924,7 +925,7 @@ func TestAddPodOrphan(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePod(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -973,7 +974,7 @@ func TestUpdatePod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -1000,7 +1001,7 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePodChangeControllerRef(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -1026,7 +1027,7 @@ func TestUpdatePodChangeControllerRef(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePodRelease(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -1052,7 +1053,7 @@ func TestUpdatePodRelease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeletePod(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -1097,7 +1098,7 @@ func TestDeletePod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeletePodOrphan(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
@ -1137,7 +1138,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
|
||||
// and checking expectations.
|
||||
func TestSyncJobExpectations(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -1298,7 +1299,7 @@ func TestJobBackoffReset(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
DefaultJobBackOff = time.Duration(0) // overwrite the default value for testing
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
@ -1338,3 +1339,161 @@ func TestJobBackoffReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var _ workqueue.RateLimitingInterface = &fakeRateLimitingQueue{}
|
||||
|
||||
type fakeRateLimitingQueue struct {
|
||||
workqueue.Interface
|
||||
requeues int
|
||||
item interface{}
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
func (f *fakeRateLimitingQueue) AddRateLimited(item interface{}) {}
|
||||
func (f *fakeRateLimitingQueue) Forget(item interface{}) {
|
||||
f.requeues = 0
|
||||
}
|
||||
func (f *fakeRateLimitingQueue) NumRequeues(item interface{}) int {
|
||||
return f.requeues
|
||||
}
|
||||
func (f *fakeRateLimitingQueue) AddAfter(item interface{}, duration time.Duration) {
|
||||
f.item = item
|
||||
f.duration = duration
|
||||
}
|
||||
|
||||
func TestJobBackoff(t *testing.T) {
|
||||
job := newJob(1, 1, 1)
|
||||
oldPod := newPod(fmt.Sprintf("pod-%v", rand.String(10)), job)
|
||||
oldPod.Status.Phase = v1.PodRunning
|
||||
oldPod.ResourceVersion = "1"
|
||||
newPod := oldPod.DeepCopy()
|
||||
newPod.ResourceVersion = "2"
|
||||
|
||||
testCases := map[string]struct {
|
||||
// inputs
|
||||
requeues int
|
||||
phase v1.PodPhase
|
||||
|
||||
// expectation
|
||||
backoff int
|
||||
}{
|
||||
"1st failure": {0, v1.PodFailed, 0},
|
||||
"2nd failure": {1, v1.PodFailed, 1},
|
||||
"3rd failure": {2, v1.PodFailed, 2},
|
||||
"1st success": {0, v1.PodSucceeded, 0},
|
||||
"2nd success": {1, v1.PodSucceeded, 0},
|
||||
"1st running": {0, v1.PodSucceeded, 0},
|
||||
"2nd running": {1, v1.PodSucceeded, 0},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
queue := &fakeRateLimitingQueue{}
|
||||
manager.queue = queue
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
|
||||
queue.requeues = tc.requeues
|
||||
newPod.Status.Phase = tc.phase
|
||||
manager.updatePod(oldPod, newPod)
|
||||
|
||||
if queue.duration.Nanoseconds() != int64(tc.backoff)*DefaultJobBackOff.Nanoseconds() {
|
||||
t.Errorf("unexpected backoff %v", queue.duration)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobBackoffForOnFailure(t *testing.T) {
|
||||
jobConditionFailed := batch.JobFailed
|
||||
|
||||
testCases := map[string]struct {
|
||||
// job setup
|
||||
parallelism int32
|
||||
completions int32
|
||||
backoffLimit int32
|
||||
|
||||
// pod setup
|
||||
jobKeyForget bool
|
||||
restartCounts []int32
|
||||
|
||||
// expectations
|
||||
expectedActive int32
|
||||
expectedSucceeded int32
|
||||
expectedFailed int32
|
||||
expectedCondition *batch.JobConditionType
|
||||
expectedConditionReason string
|
||||
}{
|
||||
"too many job failures - single pod": {
|
||||
1, 5, 2,
|
||||
true, []int32{2},
|
||||
0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
|
||||
},
|
||||
"too many job failures - multiple pods": {
|
||||
2, 5, 2,
|
||||
true, []int32{1, 1},
|
||||
0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded",
|
||||
},
|
||||
"not enough failures": {
|
||||
2, 5, 3,
|
||||
true, []int32{1, 1},
|
||||
2, 0, 0, nil, "",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
var actual *batch.Job
|
||||
manager.updateHandler = func(job *batch.Job) error {
|
||||
actual = job
|
||||
return nil
|
||||
}
|
||||
|
||||
// job & pods setup
|
||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||
job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyOnFailure
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||
for i, pod := range newPodList(int32(len(tc.restartCounts)), v1.PodRunning, job) {
|
||||
pod.Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: tc.restartCounts[i]}}
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error syncing job. Got %#v", err)
|
||||
}
|
||||
if forget != tc.jobKeyForget {
|
||||
t.Errorf("unexpected forget value. Expected %v, saw %v\n", tc.jobKeyForget, forget)
|
||||
}
|
||||
// validate status
|
||||
if actual.Status.Active != tc.expectedActive {
|
||||
t.Errorf("unexpected number of active pods. Expected %d, saw %d\n", tc.expectedActive, actual.Status.Active)
|
||||
}
|
||||
if actual.Status.Succeeded != tc.expectedSucceeded {
|
||||
t.Errorf("unexpected number of succeeded pods. Expected %d, saw %d\n", tc.expectedSucceeded, actual.Status.Succeeded)
|
||||
}
|
||||
if actual.Status.Failed != tc.expectedFailed {
|
||||
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
||||
}
|
||||
// validate conditions
|
||||
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionReason) {
|
||||
t.Errorf("expected completion condition. Got %#v", actual.Status.Conditions)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD
generated
vendored
@ -31,7 +31,6 @@ go_test(
|
||||
srcs = ["namespaced_resources_deleter_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
@ -43,13 +43,13 @@ type NamespacedResourcesDeleterInterface interface {
|
||||
}
|
||||
|
||||
func NewNamespacedResourcesDeleter(nsClient v1clientset.NamespaceInterface,
|
||||
clientPool dynamic.ClientPool, podsGetter v1clientset.PodsGetter,
|
||||
dynamicClient dynamic.Interface, podsGetter v1clientset.PodsGetter,
|
||||
discoverResourcesFn func() ([]*metav1.APIResourceList, error),
|
||||
finalizerToken v1.FinalizerName, deleteNamespaceWhenDone bool) NamespacedResourcesDeleterInterface {
|
||||
d := &namespacedResourcesDeleter{
|
||||
nsClient: nsClient,
|
||||
clientPool: clientPool,
|
||||
podsGetter: podsGetter,
|
||||
nsClient: nsClient,
|
||||
dynamicClient: dynamicClient,
|
||||
podsGetter: podsGetter,
|
||||
opCache: &operationNotSupportedCache{
|
||||
m: make(map[operationKey]bool),
|
||||
},
|
||||
@ -68,7 +68,7 @@ type namespacedResourcesDeleter struct {
|
||||
// Client to manipulate the namespace.
|
||||
nsClient v1clientset.NamespaceInterface
|
||||
// Dynamic client to list and delete all namespaced resources.
|
||||
clientPool dynamic.ClientPool
|
||||
dynamicClient dynamic.Interface
|
||||
// Interface to get PodInterface.
|
||||
podsGetter v1clientset.PodsGetter
|
||||
// Cache of what operations are not supported on each group version resource.
|
||||
@ -328,9 +328,7 @@ func (d *namespacedResourcesDeleter) finalizeNamespace(namespace *v1.Namespace)
|
||||
// deleteCollection is a helper function that will delete the collection of resources
|
||||
// it returns true if the operation was supported on the server.
|
||||
// it returns an error if the operation was supported on the server but was unable to complete.
|
||||
func (d *namespacedResourcesDeleter) deleteCollection(
|
||||
dynamicClient dynamic.Interface, gvr schema.GroupVersionResource,
|
||||
namespace string) (bool, error) {
|
||||
func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionResource, namespace string) (bool, error) {
|
||||
glog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
key := operationKey{operation: operationDeleteCollection, gvr: gvr}
|
||||
@ -339,14 +337,12 @@ func (d *namespacedResourcesDeleter) deleteCollection(
|
||||
return false, nil
|
||||
}
|
||||
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
|
||||
// namespace controller does not want the garbage collector to insert the orphan finalizer since it calls
|
||||
// resource deletions generically. it will ensure all resources in the namespace are purged prior to releasing
|
||||
// namespace itself.
|
||||
background := metav1.DeletePropagationBackground
|
||||
opts := &metav1.DeleteOptions{PropagationPolicy: &background}
|
||||
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(opts, metav1.ListOptions{})
|
||||
err := d.dynamicClient.Resource(gvr).Namespace(namespace).DeleteCollection(opts, metav1.ListOptions{})
|
||||
|
||||
if err == nil {
|
||||
return true, nil
|
||||
@ -373,8 +369,7 @@ func (d *namespacedResourcesDeleter) deleteCollection(
|
||||
// the list of items in the collection (if found)
|
||||
// a boolean if the operation is supported
|
||||
// an error if the operation is supported but could not be completed.
|
||||
func (d *namespacedResourcesDeleter) listCollection(
|
||||
dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) (*unstructured.UnstructuredList, bool, error) {
|
||||
func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResource, namespace string) (*unstructured.UnstructuredList, bool, error) {
|
||||
glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
key := operationKey{operation: operationList, gvr: gvr}
|
||||
@ -383,13 +378,8 @@ func (d *namespacedResourcesDeleter) listCollection(
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{IncludeUninitialized: true})
|
||||
unstructuredList, err := d.dynamicClient.Resource(gvr).Namespace(namespace).List(metav1.ListOptions{IncludeUninitialized: true})
|
||||
if err == nil {
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("resource: %s, expected *unstructured.UnstructuredList, got %#v", apiResource.Name, obj)
|
||||
}
|
||||
return unstructuredList, true, nil
|
||||
}
|
||||
|
||||
@ -409,22 +399,20 @@ func (d *namespacedResourcesDeleter) listCollection(
|
||||
}
|
||||
|
||||
// deleteEachItem is a helper function that will list the collection of resources and delete each item 1 by 1.
|
||||
func (d *namespacedResourcesDeleter) deleteEachItem(
|
||||
dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) error {
|
||||
func (d *namespacedResourcesDeleter) deleteEachItem(gvr schema.GroupVersionResource, namespace string) error {
|
||||
glog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
unstructuredList, listSupported, err := d.listCollection(dynamicClient, gvr, namespace)
|
||||
unstructuredList, listSupported, err := d.listCollection(gvr, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !listSupported {
|
||||
return nil
|
||||
}
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
for _, item := range unstructuredList.Items {
|
||||
background := metav1.DeletePropagationBackground
|
||||
opts := &metav1.DeleteOptions{PropagationPolicy: &background}
|
||||
if err = dynamicClient.Resource(&apiResource, namespace).Delete(item.GetName(), opts); err != nil && !errors.IsNotFound(err) && !errors.IsMethodNotSupported(err) {
|
||||
if err = d.dynamicClient.Resource(gvr).Namespace(namespace).Delete(item.GetName(), opts); err != nil && !errors.IsNotFound(err) && !errors.IsMethodNotSupported(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -447,22 +435,15 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource(
|
||||
}
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate)
|
||||
|
||||
// get a client for this group version...
|
||||
dynamicClient, err := d.clientPool.ClientForGroupVersionResource(gvr)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to get client - namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
|
||||
return estimate, err
|
||||
}
|
||||
|
||||
// first try to delete the entire collection
|
||||
deleteCollectionSupported, err := d.deleteCollection(dynamicClient, gvr, namespace)
|
||||
deleteCollectionSupported, err := d.deleteCollection(gvr, namespace)
|
||||
if err != nil {
|
||||
return estimate, err
|
||||
}
|
||||
|
||||
// delete collection was not supported, so we list and delete each item...
|
||||
if !deleteCollectionSupported {
|
||||
err = d.deleteEachItem(dynamicClient, gvr, namespace)
|
||||
err = d.deleteEachItem(gvr, namespace)
|
||||
if err != nil {
|
||||
return estimate, err
|
||||
}
|
||||
@ -471,7 +452,7 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource(
|
||||
// verify there are no more remaining items
|
||||
// it is not an error condition for there to be remaining items if local estimate is non-zero
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr)
|
||||
unstructuredList, listSupported, err := d.listCollection(dynamicClient, gvr, namespace)
|
||||
unstructuredList, listSupported, err := d.listCollection(gvr, namespace)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
|
||||
return estimate, err
|
||||
@ -497,8 +478,7 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource(
|
||||
// deleteAllContent will use the dynamic client to delete each resource identified in groupVersionResources.
|
||||
// It returns an estimate of the time remaining before the remaining resources are deleted.
|
||||
// If estimate > 0, not all resources are guaranteed to be gone.
|
||||
func (d *namespacedResourcesDeleter) deleteAllContent(
|
||||
namespace string, namespaceDeletedAt metav1.Time) (int64, error) {
|
||||
func (d *namespacedResourcesDeleter) deleteAllContent(namespace string, namespaceDeletedAt metav1.Time) (int64, error) {
|
||||
estimate := int64(0)
|
||||
glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s", namespace)
|
||||
resources, err := d.discoverResourcesFn()
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
@ -173,14 +172,16 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersio
|
||||
defer srv.Close()
|
||||
|
||||
mockClient := fake.NewSimpleClientset(testInput.testNamespace)
|
||||
clientPool := dynamic.NewClientPool(clientConfig, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fn := func() ([]*metav1.APIResourceList, error) {
|
||||
return resources, nil
|
||||
}
|
||||
d := NewNamespacedResourcesDeleter(mockClient.Core().Namespaces(), clientPool, mockClient.Core(), fn, v1.FinalizerKubernetes, true)
|
||||
err := d.Delete(testInput.testNamespace.Name)
|
||||
if err != nil {
|
||||
d := NewNamespacedResourcesDeleter(mockClient.Core().Namespaces(), dynamicClient, mockClient.Core(), fn, v1.FinalizerKubernetes, true)
|
||||
if err := d.Delete(testInput.testNamespace.Name); err != nil {
|
||||
t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err)
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go
generated
vendored
@ -63,7 +63,7 @@ type NamespaceController struct {
|
||||
// NewNamespaceController creates a new NamespaceController
|
||||
func NewNamespaceController(
|
||||
kubeClient clientset.Interface,
|
||||
clientPool dynamic.ClientPool,
|
||||
dynamicClient dynamic.Interface,
|
||||
discoverResourcesFn func() ([]*metav1.APIResourceList, error),
|
||||
namespaceInformer coreinformers.NamespaceInformer,
|
||||
resyncPeriod time.Duration,
|
||||
@ -72,7 +72,7 @@ func NewNamespaceController(
|
||||
// create the controller so we can inject the enqueue function
|
||||
namespaceController := &NamespaceController{
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
||||
namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.CoreV1().Namespaces(), clientPool, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken, true),
|
||||
namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.CoreV1().Namespaces(), dynamicClient, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken, true),
|
||||
}
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@ -163,7 +163,7 @@ func (nm *NamespaceController) worker() {
|
||||
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, err := nm.lister.Get(key)
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/BUILD
generated
vendored
@ -9,6 +9,7 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cloud_cidr_allocator_test.go",
|
||||
"controller_test.go",
|
||||
"range_allocator_test.go",
|
||||
"timeout_test.go",
|
||||
@ -48,7 +49,9 @@ go_library(
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/adapter.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/adapter.go
generated
vendored
@ -52,7 +52,7 @@ func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter {
|
||||
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
|
||||
Interface: v1core.New(k8s.CoreV1().RESTClient()).Events(""),
|
||||
Interface: k8s.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
return ret
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go
generated
vendored
@ -69,6 +69,12 @@ const (
|
||||
|
||||
// cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it.
|
||||
cidrUpdateRetries = 3
|
||||
|
||||
// updateRetryTimeout is the time to wait before requeing a failed node for retry
|
||||
updateRetryTimeout = 100 * time.Millisecond
|
||||
|
||||
// updateMaxRetries is the max retries for a failed node
|
||||
updateMaxRetries = 10
|
||||
)
|
||||
|
||||
// CIDRAllocator is an interface implemented by things that know how
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go
generated
vendored
@ -20,13 +20,14 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@ -41,9 +42,16 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
||||
)
|
||||
|
||||
// nodeProcessingInfo tracks information related to current nodes in processing
|
||||
type nodeProcessingInfo struct {
|
||||
retries int
|
||||
}
|
||||
|
||||
// cloudCIDRAllocator allocates node CIDRs according to IP address aliases
|
||||
// assigned by the cloud provider. In this case, the allocation and
|
||||
// deallocation is delegated to the external provider, and the controller
|
||||
@ -67,7 +75,7 @@ type cloudCIDRAllocator struct {
|
||||
|
||||
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
|
||||
lock sync.Mutex
|
||||
nodesInProcessing sets.String
|
||||
nodesInProcessing map[string]*nodeProcessingInfo
|
||||
}
|
||||
|
||||
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
||||
@ -82,7 +90,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
gceCloud, ok := cloud.(*gce.GCECloud)
|
||||
if !ok {
|
||||
@ -97,7 +105,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||
nodeUpdateChannel: make(chan string, cidrUpdateQueueSize),
|
||||
recorder: recorder,
|
||||
nodesInProcessing: sets.NewString(),
|
||||
nodesInProcessing: map[string]*nodeProcessingInfo{},
|
||||
}
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -108,8 +116,9 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
||||
// set to true, we need to process the node to set the condition.
|
||||
networkUnavailableTaint := &v1.Taint{Key: algorithm.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
_, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
||||
if cond == nil || cond.Status != v1.ConditionFalse {
|
||||
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
||||
return ca.AllocateOrOccupyCIDR(newNode)
|
||||
}
|
||||
return nil
|
||||
@ -147,9 +156,15 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
||||
return
|
||||
}
|
||||
if err := ca.updateCIDRAllocation(workItem); err != nil {
|
||||
// Requeue the failed node for update again.
|
||||
ca.nodeUpdateChannel <- workItem
|
||||
if ca.canRetry(workItem) {
|
||||
time.AfterFunc(updateRetryTimeout, func() {
|
||||
// Requeue the failed node for update again.
|
||||
ca.nodeUpdateChannel <- workItem
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
ca.removeNodeFromProcessing(workItem)
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
@ -159,17 +174,28 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
||||
func (ca *cloudCIDRAllocator) insertNodeToProcessing(nodeName string) bool {
|
||||
ca.lock.Lock()
|
||||
defer ca.lock.Unlock()
|
||||
if ca.nodesInProcessing.Has(nodeName) {
|
||||
if _, found := ca.nodesInProcessing[nodeName]; found {
|
||||
return false
|
||||
}
|
||||
ca.nodesInProcessing.Insert(nodeName)
|
||||
ca.nodesInProcessing[nodeName] = &nodeProcessingInfo{}
|
||||
return true
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) canRetry(nodeName string) bool {
|
||||
ca.lock.Lock()
|
||||
defer ca.lock.Unlock()
|
||||
count := ca.nodesInProcessing[nodeName].retries + 1
|
||||
if count > updateMaxRetries {
|
||||
return false
|
||||
}
|
||||
ca.nodesInProcessing[nodeName].retries = count
|
||||
return true
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {
|
||||
ca.lock.Lock()
|
||||
defer ca.lock.Unlock()
|
||||
ca.nodesInProcessing.Delete(nodeName)
|
||||
delete(ca.nodesInProcessing, nodeName)
|
||||
}
|
||||
|
||||
// WARNING: If you're adding any return calls or defer any more work from this
|
||||
@ -191,10 +217,11 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
|
||||
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
|
||||
func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
defer ca.removeNodeFromProcessing(nodeName)
|
||||
|
||||
node, err := ca.nodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil // node no longer available, skip processing
|
||||
}
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
59
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator_test.go
generated
vendored
Normal file
59
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator_test.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func hasNodeInProcessing(ca *cloudCIDRAllocator, name string) bool {
|
||||
ca.lock.Lock()
|
||||
defer ca.lock.Unlock()
|
||||
|
||||
_, found := ca.nodesInProcessing[name]
|
||||
return found
|
||||
}
|
||||
|
||||
func TestBoundedRetries(t *testing.T) {
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
updateChan := make(chan string, 1) // need to buffer as we are using only on go routine
|
||||
stopChan := make(chan struct{})
|
||||
sharedInfomer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
|
||||
ca := &cloudCIDRAllocator{
|
||||
client: clientSet,
|
||||
nodeUpdateChannel: updateChan,
|
||||
nodeLister: sharedInfomer.Core().V1().Nodes().Lister(),
|
||||
nodesSynced: sharedInfomer.Core().V1().Nodes().Informer().HasSynced,
|
||||
nodesInProcessing: map[string]*nodeProcessingInfo{},
|
||||
}
|
||||
go ca.worker(stopChan)
|
||||
nodeName := "testNode"
|
||||
ca.AllocateOrOccupyCIDR(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
})
|
||||
for hasNodeInProcessing(ca, nodeName) {
|
||||
// wait for node to finish processing (should terminate and not time out)
|
||||
}
|
||||
}
|
5
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/controller.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/controller.go
generated
vendored
@ -174,14 +174,15 @@ func (c *Controller) onAdd(node *v1.Node) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if syncer, ok := c.syncers[node.Name]; !ok {
|
||||
syncer, ok := c.syncers[node.Name]
|
||||
if !ok {
|
||||
syncer = c.newSyncer(node.Name)
|
||||
c.syncers[node.Name] = syncer
|
||||
go syncer.Loop(nil)
|
||||
} else {
|
||||
glog.Warningf("Add for node %q that already exists", node.Name)
|
||||
syncer.Update(node)
|
||||
}
|
||||
syncer.Update(node)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/range_allocator.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/range_allocator.go
generated
vendored
@ -76,7 +76,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
|
||||
if err != nil {
|
||||
|
92
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller.go
generated
vendored
92
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller.go
generated
vendored
@ -58,8 +58,7 @@ const (
|
||||
|
||||
// Controller is the controller that manages node ipam state.
|
||||
type Controller struct {
|
||||
allocateNodeCIDRs bool
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
|
||||
cloud cloudprovider.Interface
|
||||
clusterCIDR *net.IPNet
|
||||
@ -88,7 +87,6 @@ func NewNodeIpamController(
|
||||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool,
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
@ -101,61 +99,56 @@ func NewNodeIpamController(
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
|
||||
Interface: kubeClient.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
}
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
|
||||
}
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
}
|
||||
|
||||
ic := &Controller{
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
lookupIP: net.LookupIP,
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
allocatorType: allocatorType,
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
lookupIP: net.LookupIP,
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocatorType: allocatorType,
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if ic.allocateNodeCIDRs {
|
||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||
cfg := &ipam.Config{
|
||||
Resync: ipamResyncInterval,
|
||||
MaxBackoff: ipamMaxBackoff,
|
||||
InitialRetry: ipamInitialBackoff,
|
||||
}
|
||||
switch ic.allocatorType {
|
||||
case ipam.IPAMFromClusterAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCluster
|
||||
case ipam.IPAMFromCloudAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCloud
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ic.cidrAllocator, err = ipam.New(
|
||||
kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||
cfg := &ipam.Config{
|
||||
Resync: ipamResyncInterval,
|
||||
MaxBackoff: ipamMaxBackoff,
|
||||
InitialRetry: ipamInitialBackoff,
|
||||
}
|
||||
switch ic.allocatorType {
|
||||
case ipam.IPAMFromClusterAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCluster
|
||||
case ipam.IPAMFromCloudAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCloud
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ic.cidrAllocator, err = ipam.New(
|
||||
kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,11 +169,8 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if nc.allocateNodeCIDRs {
|
||||
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||
go nc.cidrAllocator.Run(stopCh)
|
||||
}
|
||||
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||
go nc.cidrAllocator.Run(stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
|
70
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go
generated
vendored
@ -22,6 +22,13 @@ limitations under the License.
|
||||
package nodelifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -50,12 +57,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -107,8 +108,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// The amount of time the nodecontroller polls on the list nodes endpoint.
|
||||
apiserverStartupGracePeriod = 10 * time.Minute
|
||||
// The amount of time the nodecontroller should sleep between retrying NodeStatus updates
|
||||
retrySleepTime = 20 * time.Millisecond
|
||||
)
|
||||
@ -153,9 +152,10 @@ type Controller struct {
|
||||
daemonSetStore extensionslisters.DaemonSetLister
|
||||
daemonSetInformerSynced cache.InformerSynced
|
||||
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
nodeShutdownInCloudProvider func(context.Context, *v1.Node) (bool, error)
|
||||
|
||||
recorder record.EventRecorder
|
||||
|
||||
@ -241,6 +241,9 @@ func NewNodeLifecycleController(podInformer coreinformers.PodInformer,
|
||||
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) {
|
||||
return nodeutil.ExistsInCloudProvider(cloud, nodeName)
|
||||
},
|
||||
nodeShutdownInCloudProvider: func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return nodeutil.ShutdownInCloudProvider(ctx, cloud, node)
|
||||
},
|
||||
recorder: recorder,
|
||||
nodeMonitorPeriod: nodeMonitorPeriod,
|
||||
nodeStartupGracePeriod: nodeStartupGracePeriod,
|
||||
@ -438,7 +441,21 @@ func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Spec.Unschedulable {
|
||||
// If unschedulable, append related taint.
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
|
||||
// Get exist taints of node.
|
||||
nodeTaints := taintutils.TaintSetFilter(node.Spec.Taints, func(t *v1.Taint) bool {
|
||||
// Find unschedulable taint of node.
|
||||
if t.Key == algorithm.TaintNodeUnschedulable {
|
||||
return true
|
||||
}
|
||||
// Find node condition taints of node.
|
||||
_, found := taintKeyToNodeConditionMap[t.Key]
|
||||
return found
|
||||
})
|
||||
@ -655,6 +672,11 @@ func (nc *Controller) monitorNodeStatus() error {
|
||||
glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
|
||||
}
|
||||
}
|
||||
// remove shutdown taint this is needed always depending do we use taintbased or not
|
||||
err := nc.markNodeAsNotShutdown(node)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Report node event.
|
||||
@ -668,6 +690,19 @@ func (nc *Controller) monitorNodeStatus() error {
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node immediately.
|
||||
if currentReadyCondition.Status != v1.ConditionTrue && nc.cloud != nil {
|
||||
// check is node shutdowned, if yes do not deleted it. Instead add taint
|
||||
shutdown, err := nc.nodeShutdownInCloudProvider(context.TODO(), node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v shutdown in cloud: %v", node.Name, err)
|
||||
}
|
||||
// node shutdown
|
||||
if shutdown && err == nil {
|
||||
err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, node.Name, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
|
||||
@ -1042,6 +1077,8 @@ func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 {
|
||||
|
||||
// addPodEvictorForNewZone checks if new zone appeared, and if so add new evictor.
|
||||
func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
if _, found := nc.zoneStates[zone]; !found {
|
||||
nc.zoneStates[zone] = stateInitial
|
||||
@ -1104,6 +1141,17 @@ func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) {
|
||||
return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil
|
||||
}
|
||||
|
||||
func (nc *Controller) markNodeAsNotShutdown(node *v1.Node) error {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone.
|
||||
// The zone is considered:
|
||||
// - fullyDisrupted if there're no Ready Nodes,
|
||||
|
137
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
generated
vendored
137
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package nodelifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -1360,6 +1361,118 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudProviderNodeShutdown(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
node *v1.Node
|
||||
shutdown bool
|
||||
}{
|
||||
{
|
||||
testName: "node shutdowned add taint",
|
||||
shutdown: true,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "node started after shutdown remove taint",
|
||||
shutdown: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{tc.node},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
}
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fnh,
|
||||
10*time.Minute,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return tc.shutdown, nil
|
||||
}
|
||||
|
||||
if err := nodeController.syncNodeStore(fnh); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(fnh.UpdatedNodes) != 1 {
|
||||
t.Errorf("Node was not updated")
|
||||
}
|
||||
if tc.shutdown {
|
||||
if len(fnh.UpdatedNodes[0].Spec.Taints) != 1 {
|
||||
t.Errorf("Node Taint was not added")
|
||||
}
|
||||
if fnh.UpdatedNodes[0].Spec.Taints[0].Key != "node.cloudprovider.kubernetes.io/shutdown" {
|
||||
t.Errorf("Node Taint key is not correct")
|
||||
}
|
||||
} else {
|
||||
if len(fnh.UpdatedNodes[0].Spec.Taints) != 0 {
|
||||
t.Errorf("Node Taint was not removed after node is back in ready state")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes
|
||||
// pods and the node when kubelet has not reported, and the cloudprovider says
|
||||
// the node is gone.
|
||||
@ -1404,6 +1517,9 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
// monitorNodeStatus should allow this node to be immediately deleted
|
||||
if err := nodeController.syncNodeStore(fnh); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -1543,9 +1659,6 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -1620,9 +1733,6 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1651,9 +1761,6 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -1755,9 +1862,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -1789,9 +1893,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -2208,9 +2309,6 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
UID: "1234567890",
|
||||
CreationTimestamp: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
@ -2242,6 +2340,9 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
fakeRecorder := testutil.NewFakeRecorder()
|
||||
nodeController.recorder = fakeRecorder
|
||||
|
93
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go
generated
vendored
93
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go
generated
vendored
@ -18,22 +18,23 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -59,6 +60,32 @@ type podUpdateItem struct {
|
||||
newTolerations []v1.Toleration
|
||||
}
|
||||
|
||||
func (n *nodeUpdateItem) name() string {
|
||||
if n.newNode != nil {
|
||||
return n.newNode.ObjectMeta.Name
|
||||
}
|
||||
if n.oldNode != nil {
|
||||
return n.oldNode.ObjectMeta.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *podUpdateItem) nodeName() string {
|
||||
if p.newPod != nil {
|
||||
return p.newPod.Spec.NodeName
|
||||
}
|
||||
if p.oldPod != nil {
|
||||
return p.oldPod.Spec.NodeName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func hash(val string, max int) int {
|
||||
hasher := fnv.New32a()
|
||||
io.WriteString(hasher, val)
|
||||
return int(hasher.Sum32() % uint32(max))
|
||||
}
|
||||
|
||||
// NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing Pods
|
||||
// from Nodes tainted with NoExecute Taints.
|
||||
type NoExecuteTaintManager struct {
|
||||
@ -70,8 +97,8 @@ type NoExecuteTaintManager struct {
|
||||
taintedNodesLock sync.Mutex
|
||||
taintedNodes map[string][]v1.Taint
|
||||
|
||||
nodeUpdateChannel chan *nodeUpdateItem
|
||||
podUpdateChannel chan *podUpdateItem
|
||||
nodeUpdateChannels []chan *nodeUpdateItem
|
||||
podUpdateChannels []chan *podUpdateItem
|
||||
|
||||
nodeUpdateQueue workqueue.Interface
|
||||
podUpdateQueue workqueue.Interface
|
||||
@ -155,17 +182,15 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if c != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
tm := &NoExecuteTaintManager{
|
||||
client: c,
|
||||
recorder: recorder,
|
||||
taintedNodes: make(map[string][]v1.Taint),
|
||||
nodeUpdateChannel: make(chan *nodeUpdateItem, nodeUpdateChannelSize),
|
||||
podUpdateChannel: make(chan *podUpdateItem, podUpdateChannelSize),
|
||||
client: c,
|
||||
recorder: recorder,
|
||||
taintedNodes: make(map[string][]v1.Taint),
|
||||
|
||||
nodeUpdateQueue: workqueue.New(),
|
||||
podUpdateQueue: workqueue.New(),
|
||||
@ -178,6 +203,15 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
|
||||
func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
glog.V(0).Infof("Starting NoExecuteTaintManager")
|
||||
|
||||
// TODO: Figure out a reasonable number of workers and propagate the
|
||||
// number of workers up making it a paramater of Run() function.
|
||||
workers := 8
|
||||
for i := 0; i < workers; i++ {
|
||||
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan *nodeUpdateItem, nodeUpdateChannelSize))
|
||||
tc.podUpdateChannels = append(tc.podUpdateChannels, make(chan *podUpdateItem, podUpdateChannelSize))
|
||||
}
|
||||
|
||||
// Functions that are responsible for taking work items out of the workqueues and putting them
|
||||
// into channels.
|
||||
go func(stopCh <-chan struct{}) {
|
||||
@ -187,11 +221,14 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
break
|
||||
}
|
||||
nodeUpdate := item.(*nodeUpdateItem)
|
||||
hash := hash(nodeUpdate.name(), workers)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
break
|
||||
case tc.nodeUpdateChannel <- nodeUpdate:
|
||||
case tc.nodeUpdateChannels[hash] <- nodeUpdate:
|
||||
}
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
}
|
||||
}(stopCh)
|
||||
|
||||
@ -202,14 +239,28 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
break
|
||||
}
|
||||
podUpdate := item.(*podUpdateItem)
|
||||
hash := hash(podUpdate.nodeName(), workers)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.podUpdateQueue.Done(item)
|
||||
break
|
||||
case tc.podUpdateChannel <- podUpdate:
|
||||
case tc.podUpdateChannels[hash] <- podUpdate:
|
||||
}
|
||||
tc.podUpdateQueue.Done(item)
|
||||
}
|
||||
}(stopCh)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go tc.worker(i, wg.Done, stopCh)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (tc *NoExecuteTaintManager) worker(worker int, done func(), stopCh <-chan struct{}) {
|
||||
defer done()
|
||||
|
||||
// When processing events we want to prioritize Node updates over Pod updates,
|
||||
// as NodeUpdates that interest NoExecuteTaintManager should be handled as soon as possible -
|
||||
// we don't want user (or system) to wait until PodUpdate queue is drained before it can
|
||||
@ -217,15 +268,15 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break
|
||||
case nodeUpdate := <-tc.nodeUpdateChannel:
|
||||
return
|
||||
case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
|
||||
tc.handleNodeUpdate(nodeUpdate)
|
||||
case podUpdate := <-tc.podUpdateChannel:
|
||||
case podUpdate := <-tc.podUpdateChannels[worker]:
|
||||
// If we found a Pod update we need to empty Node queue first.
|
||||
priority:
|
||||
for {
|
||||
select {
|
||||
case nodeUpdate := <-tc.nodeUpdateChannel:
|
||||
case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
|
||||
tc.handleNodeUpdate(nodeUpdate)
|
||||
default:
|
||||
break priority
|
||||
|
@ -61,7 +61,7 @@ func TestExecuteDelayed(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
now := time.Now()
|
||||
then := now.Add(3 * time.Second)
|
||||
then := now.Add(10 * time.Second)
|
||||
queue.AddWork(NewWorkArgs("1", "1"), now, then)
|
||||
queue.AddWork(NewWorkArgs("2", "2"), now, then)
|
||||
queue.AddWork(NewWorkArgs("3", "3"), now, then)
|
||||
@ -89,7 +89,7 @@ func TestCancel(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
now := time.Now()
|
||||
then := now.Add(3 * time.Second)
|
||||
then := now.Add(10 * time.Second)
|
||||
queue.AddWork(NewWorkArgs("1", "1"), now, then)
|
||||
queue.AddWork(NewWorkArgs("2", "2"), now, then)
|
||||
queue.AddWork(NewWorkArgs("3", "3"), now, then)
|
||||
@ -119,7 +119,7 @@ func TestCancelAndReadd(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
now := time.Now()
|
||||
then := now.Add(3 * time.Second)
|
||||
then := now.Add(10 * time.Second)
|
||||
queue.AddWork(NewWorkArgs("1", "1"), now, then)
|
||||
queue.AddWork(NewWorkArgs("2", "2"), now, then)
|
||||
queue.AddWork(NewWorkArgs("3", "3"), now, then)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
@ -69,6 +69,7 @@ go_test(
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -91,7 +91,6 @@ func NewHorizontalController(
|
||||
) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
@ -223,7 +222,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
|
||||
switch metricSpec.Type {
|
||||
case autoscalingv2.ObjectMetricSourceType:
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.TargetValue.MilliValue(), metricSpec.Object.MetricName, hpa.Namespace, &metricSpec.Object.Target)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.TargetValue.MilliValue(), metricSpec.Object.MetricName, hpa.Namespace, &metricSpec.Object.Target, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -317,7 +316,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
},
|
||||
}
|
||||
} else if metricSpec.External.TargetValue != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.TargetValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.TargetValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -621,7 +620,7 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos
|
||||
func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) {
|
||||
var firstErr error
|
||||
for i, mapping := range mappings {
|
||||
targetGR := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource).GroupResource()
|
||||
targetGR := mapping.Resource.GroupResource()
|
||||
scale, err := a.scaleNamespacer.Scales(namespace).Get(targetGR, name)
|
||||
if err == nil {
|
||||
return scale, targetGR, nil
|
||||
|
106
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
106
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -103,6 +104,7 @@ type testCase struct {
|
||||
reportedLevels []uint64
|
||||
reportedCPURequests []resource.Quantity
|
||||
reportedPodReadiness []v1.ConditionStatus
|
||||
reportedPodPhase []v1.PodPhase
|
||||
scaleUpdated bool
|
||||
statusUpdated bool
|
||||
eventCreated bool
|
||||
@ -123,6 +125,7 @@ type testCase struct {
|
||||
testClient *fake.Clientset
|
||||
testMetricsClient *metricsfake.Clientset
|
||||
testCMClient *cmfake.FakeCustomMetricsClient
|
||||
testEMClient *emfake.FakeExternalMetricsClient
|
||||
testScaleClient *scalefake.FakeScaleClient
|
||||
}
|
||||
|
||||
@ -245,15 +248,35 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
||||
|
||||
specifiedCPURequests := tc.reportedCPURequests != nil
|
||||
|
||||
numPodsToCreate := int(tc.initialReplicas)
|
||||
if specifiedCPURequests {
|
||||
numPodsToCreate = len(tc.reportedCPURequests)
|
||||
}
|
||||
|
||||
for i := 0; i < numPodsToCreate; i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.reportedPodReadiness != nil {
|
||||
podReadiness = tc.reportedPodReadiness[i]
|
||||
}
|
||||
|
||||
podPhase := v1.PodRunning
|
||||
if tc.reportedPodPhase != nil {
|
||||
podPhase = tc.reportedPodPhase[i]
|
||||
}
|
||||
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
|
||||
reportedCPURequest := resource.MustParse("1.0")
|
||||
if specifiedCPURequests {
|
||||
reportedCPURequest = tc.reportedCPURequests[i]
|
||||
}
|
||||
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Phase: podPhase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
@ -268,12 +291,13 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: tc.reportedCPURequests[i],
|
||||
v1.ResourceCPU: reportedCPURequest,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -488,7 +512,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
}
|
||||
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
|
||||
metrics := &cmapi.MetricValueList{}
|
||||
var matchedTarget *autoscalingv2.MetricSpec
|
||||
for i, target := range tc.metricsTarget {
|
||||
@ -499,7 +523,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
t.Logf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
continue
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
groupResource := mapping.Resource.GroupResource()
|
||||
|
||||
if getForAction.GetResource().Resource == groupResource.String() {
|
||||
matchedTarget = &tc.metricsTarget[i]
|
||||
@ -577,6 +601,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
||||
if tc.testCMClient != nil {
|
||||
testCMClient = tc.testCMClient
|
||||
}
|
||||
if tc.testEMClient != nil {
|
||||
testEMClient = tc.testEMClient
|
||||
}
|
||||
if tc.testScaleClient != nil {
|
||||
testScaleClient = tc.testScaleClient
|
||||
}
|
||||
@ -623,7 +650,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
||||
eventClient.Core(),
|
||||
testScaleClient,
|
||||
testClient.Autoscaling(),
|
||||
legacyscheme.Registry.RESTMapper(),
|
||||
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
|
||||
replicaCalc,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
@ -713,6 +740,24 @@ func TestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpIgnoresFailedPods(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 2,
|
||||
desiredReplicas: 4,
|
||||
CPUTarget: 30,
|
||||
CPUCurrent: 60,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpDeployment(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -1017,6 +1062,24 @@ func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 50,
|
||||
CPUCurrent: 28,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestTolerance(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 1,
|
||||
@ -1307,13 +1370,14 @@ func TestEmptyMetrics(t *testing.T) {
|
||||
|
||||
func TestEmptyCPURequest(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 1,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 1,
|
||||
desiredReplicas: 1,
|
||||
CPUTarget: 100,
|
||||
reportedLevels: []uint64{200},
|
||||
useMetricsAPI: true,
|
||||
minReplicas: 1,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 1,
|
||||
desiredReplicas: 1,
|
||||
CPUTarget: 100,
|
||||
reportedLevels: []uint64{200},
|
||||
reportedCPURequests: []resource.Quantity{},
|
||||
useMetricsAPI: true,
|
||||
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
|
||||
@ -1527,6 +1591,16 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"FailedGetExternalMetric": {
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetValue: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for reason, specs := range metricsTargets {
|
||||
@ -1540,9 +1614,10 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
_, testMetricsClient, testCMClient, _, _ := tc.prepareTestClient(t)
|
||||
_, testMetricsClient, testCMClient, testEMClient, _ := tc.prepareTestClient(t)
|
||||
tc.testMetricsClient = testMetricsClient
|
||||
tc.testCMClient = testCMClient
|
||||
tc.testEMClient = testEMClient
|
||||
|
||||
testMetricsClient.PrependReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &metricsapi.PodMetricsList{}, fmt.Errorf("something went wrong")
|
||||
@ -1550,6 +1625,9 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
testCMClient.PrependReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &cmapi.MetricValueList{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
testEMClient.PrependReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &emapi.ExternalMetricValueList{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
|
||||
tc.expectedConditions = []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -497,7 +498,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
eventClient.Core(),
|
||||
testScaleClient,
|
||||
testClient.Autoscaling(),
|
||||
legacyscheme.Registry.RESTMapper(),
|
||||
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
|
||||
replicaCalc,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
@ -524,7 +525,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
tc.verifyResults(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUp(t *testing.T) {
|
||||
func TestLegacyScaleUp(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -539,7 +540,7 @@ func LegacyTestScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -556,7 +557,7 @@ func LegacyTestScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
func TestLegacyScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -573,7 +574,7 @@ func LegacyTestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpDeployment(t *testing.T) {
|
||||
func TestLegacyScaleUpDeployment(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -593,7 +594,7 @@ func LegacyTestScaleUpDeployment(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpReplicaSet(t *testing.T) {
|
||||
func TestLegacyScaleUpReplicaSet(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -613,7 +614,7 @@ func LegacyTestScaleUpReplicaSet(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpCM(t *testing.T) {
|
||||
func TestLegacyScaleUpCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -635,7 +636,7 @@ func LegacyTestScaleUpCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -658,7 +659,7 @@ func LegacyTestScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -681,7 +682,7 @@ func LegacyTestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDown(t *testing.T) {
|
||||
func TestLegacyScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -696,7 +697,7 @@ func LegacyTestScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDownCM(t *testing.T) {
|
||||
func TestLegacyScaleDownCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -718,7 +719,7 @@ func LegacyTestScaleDownCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -840,7 +841,7 @@ func LegacyTestMaxReplicas(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestSuperfluousMetrics(t *testing.T) {
|
||||
func TestLegacySuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -1022,7 +1023,7 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpRCImmediately(t *testing.T) {
|
||||
func TestLegacyScaleUpRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
@ -1038,7 +1039,7 @@ func LegacyTestScaleUpRCImmediately(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDownRCImmediately(t *testing.T) {
|
||||
func TestLegacyScaleDownRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
|
46
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_replica_calculator_test.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_replica_calculator_test.go
generated
vendored
@ -227,7 +227,7 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("no metrics returned matched known pods"),
|
||||
@ -243,7 +243,7 @@ func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUp(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5,
|
||||
@ -260,7 +260,7 @@ func LegacyTestReplicaCalcScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -278,7 +278,7 @@ func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -296,7 +296,7 @@ func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -310,7 +310,7 @@ func LegacyTestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -325,7 +325,7 @@ func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -340,7 +340,7 @@ func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleDown(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
@ -357,7 +357,7 @@ func LegacyTestReplicaCalcScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleDownCM(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
@ -371,7 +371,7 @@ func LegacyTestReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
@ -389,7 +389,7 @@ func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcTolerance(t *testing.T) {
|
||||
func TestLegacyReplicaCalcTolerance(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -406,7 +406,7 @@ func LegacyTestReplicaCalcTolerance(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcToleranceCM(t *testing.T) {
|
||||
func TestLegacyReplicaCalcToleranceCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -420,7 +420,7 @@ func LegacyTestReplicaCalcToleranceCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 24,
|
||||
@ -436,7 +436,7 @@ func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
@ -453,7 +453,7 @@ func LegacyTestReplicaCalcMissingMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
|
||||
@ -468,7 +468,7 @@ func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
func TestLegacyReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("missing request for"),
|
||||
@ -483,7 +483,7 @@ func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
@ -500,7 +500,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
@ -517,7 +517,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
@ -534,7 +534,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -552,7 +552,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -570,7 +570,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
@ -591,7 +591,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
// TestComputedToleranceAlgImplementation is a regression test which
|
||||
// back-calculates a minimal percentage for downscaling based on a small percentage
|
||||
// increase in pod utilization which is calibrated against the tolerance value.
|
||||
func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
func TestLegacyReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
|
||||
startPods := int32(10)
|
||||
// 150 mCPU per pod.
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
@ -47,6 +47,7 @@ go_test(
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
autoscalingapi "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -153,14 +154,14 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
return true, &metrics, nil
|
||||
} else {
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
|
||||
assert.NotNil(t, tc.singleObject, "should have only requested a single-object metric when we asked for metrics for a single object")
|
||||
gk := schema.FromAPIVersionAndKind(tc.singleObject.APIVersion, tc.singleObject.Kind).GroupKind()
|
||||
mapping, err := mapper.RESTMapping(gk)
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
groupResource := mapping.Resource.GroupResource()
|
||||
|
||||
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||
assert.Equal(t, tc.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||
|
65
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
65
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
@ -88,7 +88,11 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
|
||||
if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) {
|
||||
// save this pod name for later, but pretend it doesn't exist for now
|
||||
unreadyPods.Insert(pod.Name)
|
||||
if pod.Status.Phase != v1.PodFailed {
|
||||
// Failed pods should not be counted as unready pods as they will
|
||||
// not become running anymore.
|
||||
unreadyPods.Insert(pod.Name)
|
||||
}
|
||||
delete(metrics, pod.Name)
|
||||
continue
|
||||
}
|
||||
@ -272,7 +276,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
|
||||
|
||||
// GetObjectMetricReplicas calculates the desired replica count based on a target metric utilization (as a milli-value)
|
||||
// for the given object in the given namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
utilization, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err)
|
||||
@ -283,48 +287,85 @@ func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targe
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, utilization, timestamp, nil
|
||||
}
|
||||
replicaCount = int32(math.Ceil(usageRatio * float64(currentReplicas)))
|
||||
|
||||
readyPodCount, err := c.getReadyPodsCount(namespace, selector)
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
|
||||
replicaCount = int32(math.Ceil(usageRatio * float64(readyPodCount)))
|
||||
|
||||
return replicaCount, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
// @TODO(mattjmcnaughton) Many different functions in this module use variations
|
||||
// of this function. Make this function generic, so we don't repeat the same
|
||||
// logic in multiple places.
|
||||
func (c *ReplicaCalculator) getReadyPodsCount(namespace string, selector labels.Selector) (int64, error) {
|
||||
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 0 {
|
||||
return 0, fmt.Errorf("no pods returned by selector while calculating replica count")
|
||||
}
|
||||
|
||||
readyPodCount := 0
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(&pod) {
|
||||
readyPodCount++
|
||||
}
|
||||
}
|
||||
|
||||
return int64(readyPodCount), nil
|
||||
}
|
||||
|
||||
// GetExternalMetricReplicas calculates the desired replica count based on a
|
||||
// target metric value (as a milli-value) for the external metric in the given
|
||||
// namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUtilization int64, metricName, namespace string, selector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
labelSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUtilization int64, metricName, namespace string, metricSelector *metav1.LabelSelector, podSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, err
|
||||
}
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, labelSelector)
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, metricLabelSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, selector, err)
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, metricSelector, err)
|
||||
}
|
||||
utilization = 0
|
||||
for _, val := range metrics {
|
||||
utilization = utilization + val
|
||||
}
|
||||
|
||||
readyPodCount, err := c.getReadyPodsCount(namespace, podSelector)
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
|
||||
usageRatio := float64(utilization) / float64(targetUtilization)
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
return int32(math.Ceil(usageRatio * float64(currentReplicas))), utilization, timestamp, nil
|
||||
return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
|
||||
}
|
||||
|
||||
// GetExternalPerPodMetricReplicas calculates the desired replica count based on a
|
||||
// target metric value per pod (as a milli-value) for the external metric in the
|
||||
// given namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, selector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
labelSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, err
|
||||
}
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, labelSelector)
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, metricLabelSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, selector, err)
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, metricSelector, err)
|
||||
}
|
||||
utilization = 0
|
||||
for _, val := range metrics {
|
||||
|
99
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go
generated
vendored
99
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -77,6 +78,7 @@ type replicaCalcTestCase struct {
|
||||
metric *metricInfo
|
||||
|
||||
podReadiness []v1.ConditionStatus
|
||||
podPhase []v1.PodPhase
|
||||
}
|
||||
|
||||
const (
|
||||
@ -90,15 +92,24 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < int(tc.currentReplicas); i++ {
|
||||
podsCount := int(tc.currentReplicas)
|
||||
// Failed pods are not included in tc.currentReplicas
|
||||
if tc.podPhase != nil && len(tc.podPhase) > podsCount {
|
||||
podsCount = len(tc.podPhase)
|
||||
}
|
||||
for i := 0; i < podsCount; i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.podReadiness != nil {
|
||||
if tc.podReadiness != nil && i < len(tc.podReadiness) {
|
||||
podReadiness = tc.podReadiness[i]
|
||||
}
|
||||
podPhase := v1.PodRunning
|
||||
if tc.podPhase != nil {
|
||||
podPhase = tc.podPhase[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Phase: podPhase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
@ -211,7 +222,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
return true, &metrics, nil
|
||||
}
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
|
||||
metrics := &cmapi.MetricValueList{}
|
||||
assert.NotNil(t, tc.metric.singleObject, "should have only requested a single-object metric when calling GetObjectMetricReplicas")
|
||||
gk := schema.FromAPIVersionAndKind(tc.metric.singleObject.APIVersion, tc.metric.singleObject.Kind).GroupKind()
|
||||
@ -219,7 +230,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
groupResource := mapping.Resource.GroupResource()
|
||||
|
||||
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||
assert.Equal(t, tc.metric.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||
@ -313,10 +324,10 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
||||
var outTimestamp time.Time
|
||||
var err error
|
||||
if tc.metric.singleObject != nil {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject)
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject, selector)
|
||||
} else if tc.metric.selector != nil {
|
||||
if tc.metric.targetUtilization > 0 {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector, selector)
|
||||
} else if tc.metric.perPodTargetUtilization > 0 {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
|
||||
}
|
||||
@ -405,6 +416,25 @@ func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{500, 700},
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: numContainersPerPod * 600,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
@ -468,6 +498,26 @@ func TestReplicaCalcScaleUpCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMObjectIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5, // If we did not ignore unready pods, we'd expect 15 replicas.
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{50000},
|
||||
targetUtilization: 10000,
|
||||
expectedUtilization: 50000,
|
||||
singleObject: &autoscalingv2.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Name: "some-deployment",
|
||||
},
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
@ -483,6 +533,22 @@ func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMExternalIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 2, // Would expect 6 if we didn't ignore unready pods
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{8600},
|
||||
targetUtilization: 4400,
|
||||
expectedUtilization: 8600,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMExternalNoLabels(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
@ -610,6 +676,25 @@ func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: numContainersPerPod * 280,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcTolerance(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD
generated
vendored
@ -19,22 +19,22 @@ go_library(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
@ -50,14 +50,14 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go
generated
vendored
@ -36,21 +36,21 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/integer"
|
||||
@ -90,7 +90,7 @@ type ReplicaSetController struct {
|
||||
expectations *controller.UIDTrackingControllerExpectations
|
||||
|
||||
// A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController
|
||||
rsLister extensionslisters.ReplicaSetLister
|
||||
rsLister appslisters.ReplicaSetLister
|
||||
// rsListerSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
rsListerSynced cache.InformerSynced
|
||||
@ -106,12 +106,12 @@ type ReplicaSetController struct {
|
||||
}
|
||||
|
||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
||||
func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
||||
func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas,
|
||||
extensions.SchemeGroupVersion.WithKind("ReplicaSet"),
|
||||
apps.SchemeGroupVersion.WithKind("ReplicaSet"),
|
||||
"replicaset_controller",
|
||||
"replicaset",
|
||||
controller.RealPodControl{
|
||||
@ -123,7 +123,7 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer,
|
||||
|
||||
// NewBaseController is the implementation of NewReplicaSetController with additional injected
|
||||
// parameters so that it can also serve as the implementation of NewReplicationController.
|
||||
func NewBaseController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
|
||||
func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
|
||||
gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface) *ReplicaSetController {
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -194,7 +194,7 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
// getPodReplicaSets returns a list of ReplicaSets matching the given pod.
|
||||
func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.ReplicaSet {
|
||||
func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*apps.ReplicaSet {
|
||||
rss, err := rsc.rsLister.GetPodReplicaSets(pod)
|
||||
if err != nil {
|
||||
return nil
|
||||
@ -210,7 +210,7 @@ func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.Re
|
||||
// resolveControllerRef returns the controller referenced by a ControllerRef,
|
||||
// or nil if the ControllerRef could not be resolved to a matching controller
|
||||
// of the correct Kind.
|
||||
func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.ReplicaSet {
|
||||
func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.ReplicaSet {
|
||||
// We can't look up by UID, so look up by Name and then verify UID.
|
||||
// Don't even try to look up by Name if it's the wrong Kind.
|
||||
if controllerRef.Kind != rsc.Kind {
|
||||
@ -230,8 +230,8 @@ func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controll
|
||||
|
||||
// callback when RS is updated
|
||||
func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
|
||||
oldRS := old.(*extensions.ReplicaSet)
|
||||
curRS := cur.(*extensions.ReplicaSet)
|
||||
oldRS := old.(*apps.ReplicaSet)
|
||||
curRS := cur.(*apps.ReplicaSet)
|
||||
|
||||
// You might imagine that we only really need to enqueue the
|
||||
// replica set when Spec changes, but it is safer to sync any
|
||||
@ -407,7 +407,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
||||
rsc.enqueueReplicaSet(rs)
|
||||
}
|
||||
|
||||
// obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item.
|
||||
func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@ -417,7 +417,7 @@ func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) {
|
||||
rsc.queue.Add(key)
|
||||
}
|
||||
|
||||
// obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item.
|
||||
func (rsc *ReplicaSetController) enqueueReplicaSetAfter(obj interface{}, after time.Duration) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@ -456,7 +456,7 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool {
|
||||
// manageReplicas checks and updates replicas for the given ReplicaSet.
|
||||
// Does NOT modify <filteredPods>.
|
||||
// It will requeue the replica set in case of an error while creating/deleting pods.
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *extensions.ReplicaSet) error {
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps.ReplicaSet) error {
|
||||
diff := len(filteredPods) - int(*(rs.Spec.Replicas))
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
@ -626,7 +626,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
|
||||
|
||||
// Always updates status as pods come up or die.
|
||||
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace), rs, newStatus)
|
||||
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace), rs, newStatus)
|
||||
if err != nil {
|
||||
// Multiple things could lead to this update failing. Requeuing the replica set ensures
|
||||
// Returning an error causes a requeue without forcing a hotloop
|
||||
@ -641,11 +641,11 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
return manageReplicasErr
|
||||
}
|
||||
|
||||
func (rsc *ReplicaSetController) claimPods(rs *extensions.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
|
||||
func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
|
||||
// If any adoptions are attempted, we should first recheck for deletion with
|
||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||
fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
||||
fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
176
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go
generated
vendored
176
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go
generated
vendored
@ -28,11 +28,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -45,7 +46,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
@ -54,7 +54,7 @@ func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh ch
|
||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
|
||||
ret := NewReplicaSetController(
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
client,
|
||||
burstReplicas,
|
||||
@ -78,7 +78,7 @@ func skipListerFunc(verb string, url url.URL) bool {
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
|
||||
func getKey(rs *apps.ReplicaSet, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(rs); err != nil {
|
||||
t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err)
|
||||
return ""
|
||||
@ -87,16 +87,16 @@ func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
|
||||
rs := &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: selectorMap},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -128,7 +128,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
|
||||
}
|
||||
|
||||
// create a pod with the given phase for the given rs (same selectors and namespace)
|
||||
func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
|
||||
func newPod(name string, rs *apps.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
|
||||
var conditions []v1.PodCondition
|
||||
if status == v1.PodRunning {
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
@ -154,7 +154,7 @@ func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTran
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
|
||||
func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *v1.PodList {
|
||||
func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *apps.ReplicaSet, name string) *v1.PodList {
|
||||
pods := []v1.Pod{}
|
||||
var trueVar = true
|
||||
controllerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
|
||||
@ -203,7 +203,7 @@ func validateSyncReplicaSet(t *testing.T, fakePodControl *controller.FakePodCont
|
||||
}
|
||||
|
||||
func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@ -212,7 +212,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
@ -221,7 +221,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@ -238,7 +238,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
// the controller matching the selectors of the deleted pod into the work queue.
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(1, labelMap)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||
|
||||
@ -268,7 +268,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas)
|
||||
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicaSet(getKey(rs, t))
|
||||
@ -292,7 +292,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
@ -303,7 +303,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
// Creates a replica and sets expectations
|
||||
@ -353,25 +353,25 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
func TestPodControllerLookup(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas)
|
||||
testCases := []struct {
|
||||
inRSs []*extensions.ReplicaSet
|
||||
inRSs []*apps.ReplicaSet
|
||||
pod *v1.Pod
|
||||
outRSName string
|
||||
}{
|
||||
// pods without labels don't match any ReplicaSets
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
inRSs: []*apps.ReplicaSet{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}},
|
||||
outRSName: "",
|
||||
},
|
||||
// Matching labels, not namespace
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
inRSs: []*apps.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
@ -383,10 +383,10 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
},
|
||||
// Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
inRSs: []*apps.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
@ -399,7 +399,7 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
}
|
||||
for _, c := range testCases {
|
||||
for _, r := range c.inRSs {
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(r)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r)
|
||||
}
|
||||
if rss := manager.getPodReplicaSets(c.pod); rss != nil {
|
||||
if len(rss) != 1 {
|
||||
@ -424,25 +424,25 @@ func TestWatchControllers(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
manager := NewReplicaSetController(
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Core().V1().Pods(),
|
||||
client,
|
||||
BurstReplicas,
|
||||
)
|
||||
informers.Start(stopCh)
|
||||
|
||||
var testRSSpec extensions.ReplicaSet
|
||||
var testRSSpec apps.ReplicaSet
|
||||
received := make(chan string)
|
||||
|
||||
// The update sent through the fakeWatcher should make its way into the workqueue,
|
||||
// and eventually into the syncHandler. The handler validates the received controller
|
||||
// and closes the received channel to indicate that the test can finish.
|
||||
manager.syncHandler = func(key string) error {
|
||||
obj, exists, err := informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
|
||||
obj, exists, err := informers.Apps().V1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find replica set under key %v", key)
|
||||
}
|
||||
rsSpec := *obj.(*extensions.ReplicaSet)
|
||||
rsSpec := *obj.(*apps.ReplicaSet)
|
||||
if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
||||
t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
|
||||
}
|
||||
@ -477,7 +477,7 @@ func TestWatchPods(t *testing.T) {
|
||||
// Put one ReplicaSet into the shared informer
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
testRSSpec := newReplicaSet(1, labelMap)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec)
|
||||
|
||||
received := make(chan string)
|
||||
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
|
||||
@ -540,12 +540,12 @@ func TestUpdatePods(t *testing.T) {
|
||||
// Put 2 ReplicaSets and one pod into the informers
|
||||
labelMap1 := map[string]string{"foo": "bar"}
|
||||
testRSSpec1 := newReplicaSet(1, labelMap1)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1)
|
||||
testRSSpec2 := *testRSSpec1
|
||||
labelMap2 := map[string]string{"bar": "foo"}
|
||||
testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2}
|
||||
testRSSpec2.Name = "barfoo"
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2)
|
||||
|
||||
isController := true
|
||||
controllerRef1 := metav1.OwnerReference{UID: testRSSpec1.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: testRSSpec1.Name, Controller: &isController}
|
||||
@ -656,8 +656,8 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas)
|
||||
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
rs.Status = apps.ReplicaSetStatus{Replicas: 2}
|
||||
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
@ -678,11 +678,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
|
||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &extensions.ReplicaSet{}, fmt.Errorf("Fake error")
|
||||
return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error")
|
||||
})
|
||||
fakeRSClient := fakeClient.Extensions().ReplicaSets("default")
|
||||
fakeRSClient := fakeClient.Apps().ReplicaSets("default")
|
||||
numReplicas := int32(10)
|
||||
newStatus := extensions.ReplicaSetStatus{Replicas: numReplicas}
|
||||
newStatus := apps.ReplicaSetStatus{Replicas: numReplicas}
|
||||
updateReplicaSetStatus(fakeRSClient, rs, newStatus)
|
||||
updates, gets := 0, 0
|
||||
for _, a := range fakeClient.Actions() {
|
||||
@ -702,7 +702,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
updates++
|
||||
// Confirm that the update has the right status.Replicas even though the Get
|
||||
// returned a ReplicaSet with replicas=1.
|
||||
if c, ok := action.GetObject().(*extensions.ReplicaSet); !ok {
|
||||
if c, ok := action.GetObject().(*apps.ReplicaSet); !ok {
|
||||
t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c)
|
||||
} else if c.Status.Replicas != numReplicas {
|
||||
t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead",
|
||||
@ -729,7 +729,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas)
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
|
||||
expectedPods := int32(0)
|
||||
pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
|
||||
@ -743,7 +743,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
for _, replicas := range []int32{int32(numReplicas), 0} {
|
||||
|
||||
*(rsSpec.Spec.Replicas) = replicas
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
|
||||
for i := 0; i < numReplicas; i += burstReplicas {
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
@ -881,7 +881,7 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
|
||||
// and checking expectations.
|
||||
func TestRSSyncExpectations(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@ -890,7 +890,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
||||
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
|
||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0])
|
||||
postExpectationsPod := pods.Items[1]
|
||||
@ -914,7 +914,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10)
|
||||
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@ -936,7 +936,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("No expectations found for ReplicaSet")
|
||||
}
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Delete(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
|
||||
manager.syncReplicaSet(getKey(rs, t))
|
||||
|
||||
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
||||
@ -951,10 +951,10 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
// shuffle returns a new shuffled list of container controllers.
|
||||
func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
||||
func shuffle(controllers []*apps.ReplicaSet) []*apps.ReplicaSet {
|
||||
numControllers := len(controllers)
|
||||
randIndexes := rand.Perm(numControllers)
|
||||
shuffled := make([]*extensions.ReplicaSet, numControllers)
|
||||
shuffled := make([]*apps.ReplicaSet, numControllers)
|
||||
for i := 0; i < numControllers; i++ {
|
||||
shuffled[i] = controllers[randIndexes[i]]
|
||||
}
|
||||
@ -962,7 +962,7 @@ func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
||||
}
|
||||
|
||||
func TestOverlappingRSs(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
@ -974,7 +974,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||
// All use the same CreationTimestamp since ControllerRef should be able
|
||||
// to handle that.
|
||||
timestamp := metav1.Date(2014, time.December, 0, 0, 0, 0, 0, time.Local)
|
||||
var controllers []*extensions.ReplicaSet
|
||||
var controllers []*apps.ReplicaSet
|
||||
for j := 1; j < 10; j++ {
|
||||
rsSpec := newReplicaSet(1, labelMap)
|
||||
rsSpec.CreationTimestamp = timestamp
|
||||
@ -983,7 +983,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||
}
|
||||
shuffledControllers := shuffle(controllers)
|
||||
for j := range shuffledControllers {
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j])
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod with a ControllerRef and make sure only the corresponding
|
||||
// ReplicaSet is synced. Pick a RS in the middle since the old code used to
|
||||
@ -1005,14 +1005,14 @@ func TestOverlappingRSs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeletionTimestamp(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager, informers := testNewReplicaSetControllerFromClient(c, stopCh, 10)
|
||||
|
||||
rs := newReplicaSet(1, labelMap)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
||||
@ -1116,7 +1116,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
var trueVar = true
|
||||
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
|
||||
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
||||
@ -1137,7 +1137,7 @@ func TestPatchPodFails(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
// add to podLister two matching pods. Expect two patches to take control
|
||||
// them.
|
||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil, false))
|
||||
@ -1169,7 +1169,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
|
||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
||||
|
||||
@ -1193,7 +1193,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
|
||||
// Lister (cache) says it's NOT deleted.
|
||||
rs2 := *rs
|
||||
rs2.DeletionTimestamp = nil
|
||||
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&rs2)
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&rs2)
|
||||
|
||||
// Recheck occurs if a matching orphan is present.
|
||||
pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
|
||||
@ -1209,35 +1209,35 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
|
||||
}
|
||||
|
||||
var (
|
||||
imagePullBackOff extensions.ReplicaSetConditionType = "ImagePullBackOff"
|
||||
imagePullBackOff apps.ReplicaSetConditionType = "ImagePullBackOff"
|
||||
|
||||
condImagePullBackOff = func() extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
condImagePullBackOff = func() apps.ReplicaSetCondition {
|
||||
return apps.ReplicaSetCondition{
|
||||
Type: imagePullBackOff,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NonExistentImage",
|
||||
}
|
||||
}
|
||||
|
||||
condReplicaFailure = func() extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
condReplicaFailure = func() apps.ReplicaSetCondition {
|
||||
return apps.ReplicaSetCondition{
|
||||
Type: apps.ReplicaSetReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "OtherFailure",
|
||||
}
|
||||
}
|
||||
|
||||
condReplicaFailure2 = func() extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
condReplicaFailure2 = func() apps.ReplicaSetCondition {
|
||||
return apps.ReplicaSetCondition{
|
||||
Type: apps.ReplicaSetReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "AnotherFailure",
|
||||
}
|
||||
}
|
||||
|
||||
status = func() *extensions.ReplicaSetStatus {
|
||||
return &extensions.ReplicaSetStatus{
|
||||
Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()},
|
||||
status = func() *apps.ReplicaSetStatus {
|
||||
return &apps.ReplicaSetStatus{
|
||||
Conditions: []apps.ReplicaSetCondition{condReplicaFailure()},
|
||||
}
|
||||
}
|
||||
)
|
||||
@ -1248,8 +1248,8 @@ func TestGetCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status extensions.ReplicaSetStatus
|
||||
condType extensions.ReplicaSetConditionType
|
||||
status apps.ReplicaSetStatus
|
||||
condType apps.ReplicaSetConditionType
|
||||
condStatus v1.ConditionStatus
|
||||
condReason string
|
||||
|
||||
@ -1259,7 +1259,7 @@ func TestGetCondition(t *testing.T) {
|
||||
name: "condition exists",
|
||||
|
||||
status: *exampleStatus,
|
||||
condType: extensions.ReplicaSetReplicaFailure,
|
||||
condType: apps.ReplicaSetReplicaFailure,
|
||||
|
||||
expected: true,
|
||||
},
|
||||
@ -1286,34 +1286,34 @@ func TestSetCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status *extensions.ReplicaSetStatus
|
||||
cond extensions.ReplicaSetCondition
|
||||
status *apps.ReplicaSetStatus
|
||||
cond apps.ReplicaSetCondition
|
||||
|
||||
expectedStatus *extensions.ReplicaSetStatus
|
||||
expectedStatus *apps.ReplicaSetStatus
|
||||
}{
|
||||
{
|
||||
name: "set for the first time",
|
||||
|
||||
status: &extensions.ReplicaSetStatus{},
|
||||
status: &apps.ReplicaSetStatus{},
|
||||
cond: condReplicaFailure(),
|
||||
|
||||
expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}},
|
||||
expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
|
||||
},
|
||||
{
|
||||
name: "simple set",
|
||||
|
||||
status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condImagePullBackOff()}},
|
||||
status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff()}},
|
||||
cond: condReplicaFailure(),
|
||||
|
||||
expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}},
|
||||
expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}},
|
||||
},
|
||||
{
|
||||
name: "overwrite",
|
||||
|
||||
status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}},
|
||||
status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
|
||||
cond: condReplicaFailure2(),
|
||||
|
||||
expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure2()}},
|
||||
expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure2()}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -1329,26 +1329,26 @@ func TestRemoveCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status *extensions.ReplicaSetStatus
|
||||
condType extensions.ReplicaSetConditionType
|
||||
status *apps.ReplicaSetStatus
|
||||
condType apps.ReplicaSetConditionType
|
||||
|
||||
expectedStatus *extensions.ReplicaSetStatus
|
||||
expectedStatus *apps.ReplicaSetStatus
|
||||
}{
|
||||
{
|
||||
name: "remove from empty status",
|
||||
|
||||
status: &extensions.ReplicaSetStatus{},
|
||||
condType: extensions.ReplicaSetReplicaFailure,
|
||||
status: &apps.ReplicaSetStatus{},
|
||||
condType: apps.ReplicaSetReplicaFailure,
|
||||
|
||||
expectedStatus: &extensions.ReplicaSetStatus{},
|
||||
expectedStatus: &apps.ReplicaSetStatus{},
|
||||
},
|
||||
{
|
||||
name: "simple remove",
|
||||
|
||||
status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}},
|
||||
condType: extensions.ReplicaSetReplicaFailure,
|
||||
status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
|
||||
condType: apps.ReplicaSetReplicaFailure,
|
||||
|
||||
expectedStatus: &extensions.ReplicaSetStatus{},
|
||||
expectedStatus: &apps.ReplicaSetStatus{},
|
||||
},
|
||||
{
|
||||
name: "doesn't remove anything",
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go
generated
vendored
@ -24,16 +24,16 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
// updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
|
||||
func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (*extensions.ReplicaSet, error) {
|
||||
func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSet, newStatus apps.ReplicaSetStatus) (*apps.ReplicaSet, error) {
|
||||
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
@ -53,7 +53,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext
|
||||
newStatus.ObservedGeneration = rs.Generation
|
||||
|
||||
var getErr, updateErr error
|
||||
var updatedRS *extensions.ReplicaSet
|
||||
var updatedRS *apps.ReplicaSet
|
||||
for i, rs := 0, rs; ; i++ {
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
|
||||
@ -82,7 +82,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext
|
||||
return nil, updateErr
|
||||
}
|
||||
|
||||
func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
|
||||
func calculateStatus(rs *apps.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) apps.ReplicaSetStatus {
|
||||
newStatus := rs.Status
|
||||
// Count the number of pods that have labels matching the labels of the pod
|
||||
// template of the replica set, the matching pods may have more
|
||||
@ -105,7 +105,7 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
|
||||
}
|
||||
}
|
||||
|
||||
failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
failureCond := GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
|
||||
if manageReplicasErr != nil && failureCond == nil {
|
||||
var reason string
|
||||
if diff := len(filteredPods) - int(*(rs.Spec.Replicas)); diff < 0 {
|
||||
@ -113,10 +113,10 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
|
||||
} else if diff > 0 {
|
||||
reason = "FailedDelete"
|
||||
}
|
||||
cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
|
||||
cond := NewReplicaSetCondition(apps.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
|
||||
SetCondition(&newStatus, cond)
|
||||
} else if manageReplicasErr == nil && failureCond != nil {
|
||||
RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure)
|
||||
RemoveCondition(&newStatus, apps.ReplicaSetReplicaFailure)
|
||||
}
|
||||
|
||||
newStatus.Replicas = int32(len(filteredPods))
|
||||
@ -127,8 +127,8 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
|
||||
}
|
||||
|
||||
// NewReplicaSetCondition creates a new replicaset condition.
|
||||
func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
func NewReplicaSetCondition(condType apps.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) apps.ReplicaSetCondition {
|
||||
return apps.ReplicaSetCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
@ -138,7 +138,7 @@ func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status
|
||||
}
|
||||
|
||||
// GetCondition returns a replicaset condition with the provided type if it exists.
|
||||
func GetCondition(status extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) *extensions.ReplicaSetCondition {
|
||||
func GetCondition(status apps.ReplicaSetStatus, condType apps.ReplicaSetConditionType) *apps.ReplicaSetCondition {
|
||||
for _, c := range status.Conditions {
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
@ -149,7 +149,7 @@ func GetCondition(status extensions.ReplicaSetStatus, condType extensions.Replic
|
||||
|
||||
// SetCondition adds/replaces the given condition in the replicaset status. If the condition that we
|
||||
// are about to add already exists and has the same status and reason then we are not going to update.
|
||||
func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.ReplicaSetCondition) {
|
||||
func SetCondition(status *apps.ReplicaSetStatus, condition apps.ReplicaSetCondition) {
|
||||
currentCond := GetCondition(*status, condition.Type)
|
||||
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
|
||||
return
|
||||
@ -159,13 +159,13 @@ func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.Repl
|
||||
}
|
||||
|
||||
// RemoveCondition removes the condition with the provided type from the replicaset status.
|
||||
func RemoveCondition(status *extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) {
|
||||
func RemoveCondition(status *apps.ReplicaSetStatus, condType apps.ReplicaSetConditionType) {
|
||||
status.Conditions = filterOutCondition(status.Conditions, condType)
|
||||
}
|
||||
|
||||
// filterOutCondition returns a new slice of replicaset conditions without conditions with the provided type.
|
||||
func filterOutCondition(conditions []extensions.ReplicaSetCondition, condType extensions.ReplicaSetConditionType) []extensions.ReplicaSetCondition {
|
||||
var newConditions []extensions.ReplicaSetCondition
|
||||
func filterOutCondition(conditions []apps.ReplicaSetCondition, condType apps.ReplicaSetConditionType) []apps.ReplicaSetCondition {
|
||||
var newConditions []apps.ReplicaSetCondition
|
||||
for _, c := range conditions {
|
||||
if c.Type == condType {
|
||||
continue
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils_test.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils_test.go
generated
vendored
@ -23,8 +23,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
)
|
||||
|
||||
func TestCalculateStatus(t *testing.T) {
|
||||
@ -38,9 +38,9 @@ func TestCalculateStatus(t *testing.T) {
|
||||
|
||||
rsStatusTests := []struct {
|
||||
name string
|
||||
replicaset *extensions.ReplicaSet
|
||||
replicaset *apps.ReplicaSet
|
||||
filteredPods []*v1.Pod
|
||||
expectedReplicaSetStatus extensions.ReplicaSetStatus
|
||||
expectedReplicaSetStatus apps.ReplicaSetStatus
|
||||
}{
|
||||
{
|
||||
"1 fully labelled pod",
|
||||
@ -48,7 +48,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
[]*v1.Pod{
|
||||
newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 1,
|
||||
FullyLabeledReplicas: 1,
|
||||
ReadyReplicas: 1,
|
||||
@ -61,7 +61,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
[]*v1.Pod{
|
||||
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 1,
|
||||
FullyLabeledReplicas: 0,
|
||||
ReadyReplicas: 1,
|
||||
@ -75,7 +75,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true),
|
||||
newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 2,
|
||||
FullyLabeledReplicas: 2,
|
||||
ReadyReplicas: 2,
|
||||
@ -89,7 +89,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
|
||||
newPod("pod2", notFullyLabelledRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 2,
|
||||
FullyLabeledReplicas: 0,
|
||||
ReadyReplicas: 2,
|
||||
@ -103,7 +103,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
|
||||
newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 2,
|
||||
FullyLabeledReplicas: 1,
|
||||
ReadyReplicas: 2,
|
||||
@ -116,7 +116,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
[]*v1.Pod{
|
||||
newPod("pod1", fullyLabelledRS, v1.PodPending, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 1,
|
||||
FullyLabeledReplicas: 1,
|
||||
ReadyReplicas: 0,
|
||||
@ -129,7 +129,7 @@ func TestCalculateStatus(t *testing.T) {
|
||||
[]*v1.Pod{
|
||||
newPod("pod1", longMinReadySecondsRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
extensions.ReplicaSetStatus{
|
||||
apps.ReplicaSetStatus{
|
||||
Replicas: 1,
|
||||
FullyLabeledReplicas: 1,
|
||||
ReadyReplicas: 1,
|
||||
@ -150,19 +150,19 @@ func TestCalculateStatusConditions(t *testing.T) {
|
||||
labelMap := map[string]string{"name": "foo"}
|
||||
rs := newReplicaSet(2, labelMap)
|
||||
replicaFailureRS := newReplicaSet(10, labelMap)
|
||||
replicaFailureRS.Status.Conditions = []extensions.ReplicaSetCondition{
|
||||
replicaFailureRS.Status.Conditions = []apps.ReplicaSetCondition{
|
||||
{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
Type: apps.ReplicaSetReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
}
|
||||
|
||||
rsStatusConditionTests := []struct {
|
||||
name string
|
||||
replicaset *extensions.ReplicaSet
|
||||
replicaset *apps.ReplicaSet
|
||||
filteredPods []*v1.Pod
|
||||
manageReplicasErr error
|
||||
expectedReplicaSetConditions []extensions.ReplicaSetCondition
|
||||
expectedReplicaSetConditions []apps.ReplicaSetCondition
|
||||
}{
|
||||
|
||||
{
|
||||
@ -172,9 +172,9 @@ func TestCalculateStatusConditions(t *testing.T) {
|
||||
newPod("pod1", rs, v1.PodRunning, nil, true),
|
||||
},
|
||||
fmt.Errorf("fake manageReplicasErr"),
|
||||
[]extensions.ReplicaSetCondition{
|
||||
[]apps.ReplicaSetCondition{
|
||||
{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
Type: apps.ReplicaSetReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "FailedCreate",
|
||||
Message: "fake manageReplicasErr",
|
||||
@ -190,9 +190,9 @@ func TestCalculateStatusConditions(t *testing.T) {
|
||||
newPod("pod3", rs, v1.PodRunning, nil, true),
|
||||
},
|
||||
fmt.Errorf("fake manageReplicasErr"),
|
||||
[]extensions.ReplicaSetCondition{
|
||||
[]apps.ReplicaSetCondition{
|
||||
{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
Type: apps.ReplicaSetReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "FailedDelete",
|
||||
Message: "fake manageReplicasErr",
|
||||
@ -215,9 +215,9 @@ func TestCalculateStatusConditions(t *testing.T) {
|
||||
newPod("pod1", replicaFailureRS, v1.PodRunning, nil, true),
|
||||
},
|
||||
fmt.Errorf("fake manageReplicasErr"),
|
||||
[]extensions.ReplicaSetCondition{
|
||||
[]apps.ReplicaSetCondition{
|
||||
{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
Type: apps.ReplicaSetReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD
generated
vendored
@ -16,14 +16,14 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/replication",
|
||||
deps = [
|
||||
"//pkg/apis/apps/v1:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@ -34,11 +34,9 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
|
104
vendor/k8s.io/kubernetes/pkg/controller/replication/conversion.go
generated
vendored
104
vendor/k8s.io/kubernetes/pkg/controller/replication/conversion.go
generated
vendored
@ -26,8 +26,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -36,16 +36,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
|
||||
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
v1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
extensionsv1beta1client "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
v1listers "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
appsconversion "k8s.io/kubernetes/pkg/apis/apps/v1"
|
||||
apiv1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensionsinternalv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
@ -59,7 +57,7 @@ func (i informerAdapter) Informer() cache.SharedIndexInformer {
|
||||
return conversionInformer{i.rcInformer.Informer()}
|
||||
}
|
||||
|
||||
func (i informerAdapter) Lister() extensionslisters.ReplicaSetLister {
|
||||
func (i informerAdapter) Lister() appslisters.ReplicaSetLister {
|
||||
return conversionLister{i.rcInformer.Lister()}
|
||||
}
|
||||
|
||||
@ -79,7 +77,7 @@ type conversionLister struct {
|
||||
rcLister v1listers.ReplicationControllerLister
|
||||
}
|
||||
|
||||
func (l conversionLister) List(selector labels.Selector) ([]*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (l conversionLister) List(selector labels.Selector) ([]*apps.ReplicaSet, error) {
|
||||
rcList, err := l.rcLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -87,11 +85,11 @@ func (l conversionLister) List(selector labels.Selector) ([]*extensionsv1beta1.R
|
||||
return convertSlice(rcList)
|
||||
}
|
||||
|
||||
func (l conversionLister) ReplicaSets(namespace string) extensionslisters.ReplicaSetNamespaceLister {
|
||||
func (l conversionLister) ReplicaSets(namespace string) appslisters.ReplicaSetNamespaceLister {
|
||||
return conversionNamespaceLister{l.rcLister.ReplicationControllers(namespace)}
|
||||
}
|
||||
|
||||
func (l conversionLister) GetPodReplicaSets(pod *v1.Pod) ([]*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (l conversionLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) {
|
||||
rcList, err := l.rcLister.GetPodControllers(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -103,7 +101,7 @@ type conversionNamespaceLister struct {
|
||||
rcLister v1listers.ReplicationControllerNamespaceLister
|
||||
}
|
||||
|
||||
func (l conversionNamespaceLister) List(selector labels.Selector) ([]*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (l conversionNamespaceLister) List(selector labels.Selector) ([]*apps.ReplicaSet, error) {
|
||||
rcList, err := l.rcLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -111,7 +109,7 @@ func (l conversionNamespaceLister) List(selector labels.Selector) ([]*extensions
|
||||
return convertSlice(rcList)
|
||||
}
|
||||
|
||||
func (l conversionNamespaceLister) Get(name string) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (l conversionNamespaceLister) Get(name string) (*apps.ReplicaSet, error) {
|
||||
rc, err := l.rcLister.Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -182,58 +180,20 @@ type clientsetAdapter struct {
|
||||
clientset.Interface
|
||||
}
|
||||
|
||||
func (c clientsetAdapter) ExtensionsV1beta1() extensionsv1beta1client.ExtensionsV1beta1Interface {
|
||||
return conversionExtensionsClient{c.Interface, c.Interface.ExtensionsV1beta1()}
|
||||
}
|
||||
|
||||
func (c clientsetAdapter) Extensions() extensionsv1beta1client.ExtensionsV1beta1Interface {
|
||||
return conversionExtensionsClient{c.Interface, c.Interface.ExtensionsV1beta1()}
|
||||
}
|
||||
|
||||
func (c clientsetAdapter) AppsV1beta2() appsv1beta2.AppsV1beta2Interface {
|
||||
return conversionAppsV1beta2Client{c.Interface, c.Interface.AppsV1beta2()}
|
||||
}
|
||||
|
||||
func (c clientsetAdapter) AppsV1() appsv1.AppsV1Interface {
|
||||
func (c clientsetAdapter) AppsV1() appsv1client.AppsV1Interface {
|
||||
return conversionAppsV1Client{c.Interface, c.Interface.AppsV1()}
|
||||
}
|
||||
|
||||
func (c clientsetAdapter) Apps() appsv1.AppsV1Interface {
|
||||
func (c clientsetAdapter) Apps() appsv1client.AppsV1Interface {
|
||||
return conversionAppsV1Client{c.Interface, c.Interface.AppsV1()}
|
||||
}
|
||||
|
||||
type conversionAppsV1beta2Client struct {
|
||||
clientset clientset.Interface
|
||||
appsv1beta2.AppsV1beta2Interface
|
||||
}
|
||||
|
||||
func (c conversionAppsV1beta2Client) ReplicaSets(namespace string) appsv1beta2.ReplicaSetInterface {
|
||||
// TODO(enisoc): This will force RC integration tests to fail if anyone tries to update
|
||||
// ReplicaSetController to use apps/v1beta2 without updating this conversion adapter.
|
||||
// Please change conversionClient to use the new RS version instead of extensions/v1beta1,
|
||||
// and then return a conversionClient here.
|
||||
panic("need to update RC/RS conversionClient for apps/v1beta2")
|
||||
}
|
||||
|
||||
type conversionAppsV1Client struct {
|
||||
clientset clientset.Interface
|
||||
appsv1.AppsV1Interface
|
||||
appsv1client.AppsV1Interface
|
||||
}
|
||||
|
||||
func (c conversionAppsV1Client) ReplicaSets(namespace string) appsv1.ReplicaSetInterface {
|
||||
// TODO(enisoc): This will force RC integration tests to fail if anyone tries to update
|
||||
// ReplicaSetController to use apps/v1 without updating this conversion adapter.
|
||||
// Please change conversionClient to use the new RS version instead of extensions/v1beta1,
|
||||
// and then return a conversionClient here.
|
||||
panic("need to update RC/RS conversionClient for apps/v1")
|
||||
}
|
||||
|
||||
type conversionExtensionsClient struct {
|
||||
clientset clientset.Interface
|
||||
extensionsv1beta1client.ExtensionsV1beta1Interface
|
||||
}
|
||||
|
||||
func (c conversionExtensionsClient) ReplicaSets(namespace string) extensionsv1beta1client.ReplicaSetInterface {
|
||||
func (c conversionAppsV1Client) ReplicaSets(namespace string) appsv1client.ReplicaSetInterface {
|
||||
return conversionClient{c.clientset.CoreV1().ReplicationControllers(namespace)}
|
||||
}
|
||||
|
||||
@ -241,19 +201,19 @@ type conversionClient struct {
|
||||
v1client.ReplicationControllerInterface
|
||||
}
|
||||
|
||||
func (c conversionClient) Create(rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (c conversionClient) Create(rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
||||
return convertCall(c.ReplicationControllerInterface.Create, rs)
|
||||
}
|
||||
|
||||
func (c conversionClient) Update(rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (c conversionClient) Update(rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
||||
return convertCall(c.ReplicationControllerInterface.Update, rs)
|
||||
}
|
||||
|
||||
func (c conversionClient) UpdateStatus(rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (c conversionClient) UpdateStatus(rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
||||
return convertCall(c.ReplicationControllerInterface.UpdateStatus, rs)
|
||||
}
|
||||
|
||||
func (c conversionClient) Get(name string, options metav1.GetOptions) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func (c conversionClient) Get(name string, options metav1.GetOptions) (*apps.ReplicaSet, error) {
|
||||
rc, err := c.ReplicationControllerInterface.Get(name, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -261,7 +221,7 @@ func (c conversionClient) Get(name string, options metav1.GetOptions) (*extensio
|
||||
return convertRCtoRS(rc, nil)
|
||||
}
|
||||
|
||||
func (c conversionClient) List(opts metav1.ListOptions) (*extensionsv1beta1.ReplicaSetList, error) {
|
||||
func (c conversionClient) List(opts metav1.ListOptions) (*apps.ReplicaSetList, error) {
|
||||
rcList, err := c.ReplicationControllerInterface.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -274,13 +234,13 @@ func (c conversionClient) Watch(opts metav1.ListOptions) (watch.Interface, error
|
||||
return nil, errors.New("Watch() is not implemented for conversionClient")
|
||||
}
|
||||
|
||||
func (c conversionClient) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensionsv1beta1.ReplicaSet, err error) {
|
||||
func (c conversionClient) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ReplicaSet, err error) {
|
||||
// This is not used by RSC.
|
||||
return nil, errors.New("Patch() is not implemented for conversionClient")
|
||||
}
|
||||
|
||||
func convertSlice(rcList []*v1.ReplicationController) ([]*extensionsv1beta1.ReplicaSet, error) {
|
||||
rsList := make([]*extensionsv1beta1.ReplicaSet, 0, len(rcList))
|
||||
func convertSlice(rcList []*v1.ReplicationController) ([]*apps.ReplicaSet, error) {
|
||||
rsList := make([]*apps.ReplicaSet, 0, len(rcList))
|
||||
for _, rc := range rcList {
|
||||
rs, err := convertRCtoRS(rc, nil)
|
||||
if err != nil {
|
||||
@ -291,8 +251,8 @@ func convertSlice(rcList []*v1.ReplicationController) ([]*extensionsv1beta1.Repl
|
||||
return rsList, nil
|
||||
}
|
||||
|
||||
func convertList(rcList *v1.ReplicationControllerList) (*extensionsv1beta1.ReplicaSetList, error) {
|
||||
rsList := &extensionsv1beta1.ReplicaSetList{Items: make([]extensionsv1beta1.ReplicaSet, len(rcList.Items))}
|
||||
func convertList(rcList *v1.ReplicationControllerList) (*apps.ReplicaSetList, error) {
|
||||
rsList := &apps.ReplicaSetList{Items: make([]apps.ReplicaSet, len(rcList.Items))}
|
||||
for i := range rcList.Items {
|
||||
rc := &rcList.Items[i]
|
||||
_, err := convertRCtoRS(rc, &rsList.Items[i])
|
||||
@ -303,7 +263,7 @@ func convertList(rcList *v1.ReplicationControllerList) (*extensionsv1beta1.Repli
|
||||
return rsList, nil
|
||||
}
|
||||
|
||||
func convertCall(fn func(*v1.ReplicationController) (*v1.ReplicationController, error), rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func convertCall(fn func(*v1.ReplicationController) (*v1.ReplicationController, error), rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
||||
rc, err := convertRStoRC(rs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -315,23 +275,23 @@ func convertCall(fn func(*v1.ReplicationController) (*v1.ReplicationController,
|
||||
return convertRCtoRS(result, nil)
|
||||
}
|
||||
|
||||
func convertRCtoRS(rc *v1.ReplicationController, out *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) {
|
||||
func convertRCtoRS(rc *v1.ReplicationController, out *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
||||
var rsInternal extensions.ReplicaSet
|
||||
if err := apiv1.Convert_v1_ReplicationController_to_extensions_ReplicaSet(rc, &rsInternal, nil); err != nil {
|
||||
return nil, fmt.Errorf("can't convert ReplicationController %v/%v to ReplicaSet: %v", rc.Namespace, rc.Name, err)
|
||||
}
|
||||
if out == nil {
|
||||
out = new(extensionsv1beta1.ReplicaSet)
|
||||
out = new(apps.ReplicaSet)
|
||||
}
|
||||
if err := extensionsinternalv1beta1.Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&rsInternal, out, nil); err != nil {
|
||||
if err := appsconversion.Convert_extensions_ReplicaSet_To_v1_ReplicaSet(&rsInternal, out, nil); err != nil {
|
||||
return nil, fmt.Errorf("can't convert ReplicaSet (converted from ReplicationController %v/%v) from internal to extensions/v1beta1: %v", rc.Namespace, rc.Name, err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func convertRStoRC(rs *extensionsv1beta1.ReplicaSet) (*v1.ReplicationController, error) {
|
||||
func convertRStoRC(rs *apps.ReplicaSet) (*v1.ReplicationController, error) {
|
||||
var rsInternal extensions.ReplicaSet
|
||||
if err := extensionsinternalv1beta1.Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(rs, &rsInternal, nil); err != nil {
|
||||
if err := appsconversion.Convert_v1_ReplicaSet_To_extensions_ReplicaSet(rs, &rsInternal, nil); err != nil {
|
||||
return nil, fmt.Errorf("can't convert ReplicaSet (converting to ReplicationController %v/%v) from extensions/v1beta1 to internal: %v", rs.Namespace, rs.Name, err)
|
||||
}
|
||||
var rc v1.ReplicationController
|
||||
@ -356,7 +316,7 @@ func (pc podControlAdapter) CreatePodsOnNode(nodeName, namespace string, templat
|
||||
}
|
||||
|
||||
func (pc podControlAdapter) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
rc, err := convertRStoRC(object.(*extensionsv1beta1.ReplicaSet))
|
||||
rc, err := convertRStoRC(object.(*apps.ReplicaSet))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -364,7 +324,7 @@ func (pc podControlAdapter) CreatePodsWithControllerRef(namespace string, templa
|
||||
}
|
||||
|
||||
func (pc podControlAdapter) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
rc, err := convertRStoRC(object.(*extensionsv1beta1.ReplicaSet))
|
||||
rc, err := convertRStoRC(object.(*apps.ReplicaSet))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go
generated
vendored
@ -52,7 +52,7 @@ type ReplicationManager struct {
|
||||
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
return &ReplicationManager{
|
||||
*replicaset.NewBaseController(informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas,
|
||||
v1.SchemeGroupVersion.WithKind("ReplicationController"),
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
@ -297,7 +297,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -337,7 +337,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc
|
||||
}
|
||||
hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
|
||||
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry)
|
||||
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/route/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/route/BUILD
generated
vendored
@ -21,7 +21,6 @@ go_library(
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
@ -34,6 +33,7 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/controller/route/route_controller.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/controller/route/route_controller.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -38,6 +38,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
v1node "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -49,12 +50,14 @@ const (
|
||||
// Maximal number of concurrent CreateRoute API calls.
|
||||
// TODO: This should be per-provider.
|
||||
maxConcurrentRouteCreations int = 200
|
||||
// Maximum number of retries of route creations.
|
||||
maxRetries int = 5
|
||||
// Maximum number of retries of node status update.
|
||||
updateNodeStatusMaxRetries int = 3
|
||||
)
|
||||
|
||||
var updateNetworkConditionBackoff = wait.Backoff{
|
||||
Steps: 5, // Maximum number of retries.
|
||||
Duration: 100 * time.Millisecond,
|
||||
Jitter: 1.0,
|
||||
}
|
||||
|
||||
type RouteController struct {
|
||||
routes cloudprovider.Routes
|
||||
kubeClient clientset.Interface
|
||||
@ -104,7 +107,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration)
|
||||
}
|
||||
|
||||
if rc.broadcaster != nil {
|
||||
rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: rc.kubeClient.CoreV1().Events("")})
|
||||
}
|
||||
|
||||
// TODO: If we do just the full Resync every 5 minutes (default value)
|
||||
@ -165,18 +168,18 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
wg.Add(1)
|
||||
go func(nodeName types.NodeName, nameHint string, route *cloudprovider.Route) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
err := clientretry.RetryOnConflict(updateNetworkConditionBackoff, func() error {
|
||||
startTime := time.Now()
|
||||
// Ensure that we don't have more than maxConcurrentRouteCreations
|
||||
// CreateRoute calls in flight.
|
||||
rateLimiter <- struct{}{}
|
||||
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
|
||||
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
||||
err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
|
||||
<-rateLimiter
|
||||
|
||||
rc.updateNetworkingCondition(nodeName, err == nil)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err)
|
||||
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
|
||||
if rc.recorder != nil {
|
||||
rc.recorder.Eventf(
|
||||
&v1.ObjectReference{
|
||||
@ -186,12 +189,14 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
Namespace: "",
|
||||
}, v1.EventTypeWarning, "FailedToCreateRoute", msg)
|
||||
}
|
||||
glog.Error(msg)
|
||||
|
||||
} else {
|
||||
glog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
|
||||
return
|
||||
glog.V(4).Infof(msg)
|
||||
return err
|
||||
}
|
||||
glog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err)
|
||||
}
|
||||
}(nodeName, nameHint, route)
|
||||
} else {
|
||||
@ -210,14 +215,13 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
wg.Add(1)
|
||||
// Delete the route.
|
||||
go func(route *cloudprovider.Route, startTime time.Time) {
|
||||
defer wg.Done()
|
||||
glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
|
||||
if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
|
||||
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime), err)
|
||||
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
|
||||
} else {
|
||||
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime))
|
||||
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
|
||||
}
|
||||
wg.Done()
|
||||
|
||||
}(route, time.Now())
|
||||
}
|
||||
}
|
||||
@ -227,8 +231,8 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
}
|
||||
|
||||
func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, routeCreated bool) error {
|
||||
var err error
|
||||
for i := 0; i < updateNodeStatusMaxRetries; i++ {
|
||||
err := clientretry.RetryOnConflict(updateNetworkConditionBackoff, func() error {
|
||||
var err error
|
||||
// Patch could also fail, even though the chance is very slim. So we still do
|
||||
// patch in the retry loop.
|
||||
currentTime := metav1.Now()
|
||||
@ -249,16 +253,16 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
|
||||
LastTransitionTime: currentTime,
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
return nil
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err)
|
||||
}
|
||||
if !errors.IsConflict(err) {
|
||||
glog.Errorf("Error updating node %s: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", nodeName, err)
|
||||
}
|
||||
glog.Errorf("Error updating node %s: %v", nodeName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go
generated
vendored
@ -111,7 +111,7 @@ func New(
|
||||
) (*ServiceController, error) {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller_test.go
generated
vendored
@ -247,9 +247,8 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
controller, cloud, _ := newController()
|
||||
|
||||
var services []*v1.Service
|
||||
for _, service := range item.services {
|
||||
services = append(services, service)
|
||||
}
|
||||
services = append(services, item.services...)
|
||||
|
||||
if err := controller.updateLoadBalancerHosts(services, nodes); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go
generated
vendored
@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool {
|
||||
func (c *ServiceAccountsController) syncNamespace(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, err := c.nsLister.Get(key)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user