vendor update for E2E framework

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2019-05-31 15:15:11 +05:30
parent 9bb23e4e32
commit d300da19b7
2149 changed files with 598692 additions and 14107 deletions

146
vendor/k8s.io/kubernetes/pkg/kubectl/apply.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
)
var metadataAccessor = meta.NewAccessor()
// GetOriginalConfiguration retrieves the original configuration of the object
// from the annotation, or nil if no annotation was found.
func GetOriginalConfiguration(obj runtime.Object) ([]byte, error) {
annots, err := metadataAccessor.Annotations(obj)
if err != nil {
return nil, err
}
if annots == nil {
return nil, nil
}
original, ok := annots[v1.LastAppliedConfigAnnotation]
if !ok {
return nil, nil
}
return []byte(original), nil
}
// SetOriginalConfiguration sets the original configuration of the object
// as the annotation on the object for later use in computing a three way patch.
func setOriginalConfiguration(obj runtime.Object, original []byte) error {
if len(original) < 1 {
return nil
}
annots, err := metadataAccessor.Annotations(obj)
if err != nil {
return err
}
if annots == nil {
annots = map[string]string{}
}
annots[v1.LastAppliedConfigAnnotation] = string(original)
return metadataAccessor.SetAnnotations(obj, annots)
}
// GetModifiedConfiguration retrieves the modified configuration of the object.
// If annotate is true, it embeds the result as an annotation in the modified
// configuration. If an object was read from the command input, it will use that
// version of the object. Otherwise, it will use the version from the server.
func GetModifiedConfiguration(obj runtime.Object, annotate bool, codec runtime.Encoder) ([]byte, error) {
// First serialize the object without the annotation to prevent recursion,
// then add that serialization to it as the annotation and serialize it again.
var modified []byte
// Otherwise, use the server side version of the object.
// Get the current annotations from the object.
annots, err := metadataAccessor.Annotations(obj)
if err != nil {
return nil, err
}
if annots == nil {
annots = map[string]string{}
}
original := annots[v1.LastAppliedConfigAnnotation]
delete(annots, v1.LastAppliedConfigAnnotation)
if err := metadataAccessor.SetAnnotations(obj, annots); err != nil {
return nil, err
}
modified, err = runtime.Encode(codec, obj)
if err != nil {
return nil, err
}
if annotate {
annots[v1.LastAppliedConfigAnnotation] = string(modified)
if err := metadataAccessor.SetAnnotations(obj, annots); err != nil {
return nil, err
}
modified, err = runtime.Encode(codec, obj)
if err != nil {
return nil, err
}
}
// Restore the object to its original condition.
annots[v1.LastAppliedConfigAnnotation] = original
if err := metadataAccessor.SetAnnotations(obj, annots); err != nil {
return nil, err
}
return modified, nil
}
// UpdateApplyAnnotation calls CreateApplyAnnotation if the last applied
// configuration annotation is already present. Otherwise, it does nothing.
func UpdateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error {
if original, err := GetOriginalConfiguration(obj); err != nil || len(original) <= 0 {
return err
}
return CreateApplyAnnotation(obj, codec)
}
// CreateApplyAnnotation gets the modified configuration of the object,
// without embedding it again, and then sets it on the object as the annotation.
func CreateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error {
modified, err := GetModifiedConfiguration(obj, false, codec)
if err != nil {
return err
}
return setOriginalConfiguration(obj, modified)
}
// CreateOrUpdateAnnotation creates the annotation used by
// kubectl apply only when createAnnotation is true
// Otherwise, only update the annotation when it already exists
func CreateOrUpdateAnnotation(createAnnotation bool, obj runtime.Object, codec runtime.Encoder) error {
if createAnnotation {
return CreateApplyAnnotation(obj, codec)
}
return UpdateApplyAnnotation(obj, codec)
}

View File

@ -0,0 +1,89 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// KindVisitor is used with GroupKindElement to call a particular function depending on the
// Kind of a schema.GroupKind
type KindVisitor interface {
VisitDaemonSet(kind GroupKindElement)
VisitDeployment(kind GroupKindElement)
VisitJob(kind GroupKindElement)
VisitPod(kind GroupKindElement)
VisitReplicaSet(kind GroupKindElement)
VisitReplicationController(kind GroupKindElement)
VisitStatefulSet(kind GroupKindElement)
VisitCronJob(kind GroupKindElement)
}
// GroupKindElement defines a Kubernetes API group elem
type GroupKindElement schema.GroupKind
// Accept calls the Visit method on visitor that corresponds to elem's Kind
func (elem GroupKindElement) Accept(visitor KindVisitor) error {
switch {
case elem.GroupMatch("apps", "extensions") && elem.Kind == "DaemonSet":
visitor.VisitDaemonSet(elem)
case elem.GroupMatch("apps", "extensions") && elem.Kind == "Deployment":
visitor.VisitDeployment(elem)
case elem.GroupMatch("batch") && elem.Kind == "Job":
visitor.VisitJob(elem)
case elem.GroupMatch("", "core") && elem.Kind == "Pod":
visitor.VisitPod(elem)
case elem.GroupMatch("apps", "extensions") && elem.Kind == "ReplicaSet":
visitor.VisitReplicaSet(elem)
case elem.GroupMatch("", "core") && elem.Kind == "ReplicationController":
visitor.VisitReplicationController(elem)
case elem.GroupMatch("apps") && elem.Kind == "StatefulSet":
visitor.VisitStatefulSet(elem)
case elem.GroupMatch("batch") && elem.Kind == "CronJob":
visitor.VisitCronJob(elem)
default:
return fmt.Errorf("no visitor method exists for %v", elem)
}
return nil
}
// GroupMatch returns true if and only if elem's group matches one
// of the group arguments
func (elem GroupKindElement) GroupMatch(groups ...string) bool {
for _, g := range groups {
if elem.Group == g {
return true
}
}
return false
}
// NoOpKindVisitor implements KindVisitor with no-op functions.
type NoOpKindVisitor struct{}
var _ KindVisitor = &NoOpKindVisitor{}
func (*NoOpKindVisitor) VisitDaemonSet(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitDeployment(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitJob(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitPod(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitReplicaSet(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitReplicationController(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitStatefulSet(kind GroupKindElement) {}
func (*NoOpKindVisitor) VisitCronJob(kind GroupKindElement) {}

100
vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
// ControllerHasDesiredReplicas returns a condition that will be true if and only if
// the desired replica count for a controller's ReplicaSelector equals the Replicas count.
func ControllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGetter, controller *corev1.ReplicationController) wait.ConditionFunc {
// If we're given a controller where the status lags the spec, it either means that the controller is stale,
// or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case.
desiredGeneration := controller.Generation
return func() (bool, error) {
ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
// There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass,
// or, after this check has passed, a modification causes the rc manager to create more pods.
// This will not be an issue once we've implemented graceful delete for rcs, but till then
// concurrent stop operations on the same rc might have unintended side effects.
return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == valOrZero(ctrl.Spec.Replicas), nil
}
}
// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var ErrPodCompleted = fmt.Errorf("pod ran to completion")
// PodCompleted returns true if the pod has run to completion, false if the pod has not yet
// reached running state, or an error in any other case.
func PodCompleted(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "")
}
switch t := event.Object.(type) {
case *corev1.Pod:
switch t.Status.Phase {
case corev1.PodFailed, corev1.PodSucceeded:
return true, nil
}
}
return false, nil
}
// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not
// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or
// an error in any other case.
func PodRunningAndReady(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "")
}
switch t := event.Object.(type) {
case *corev1.Pod:
switch t.Status.Phase {
case corev1.PodFailed, corev1.PodSucceeded:
return false, ErrPodCompleted
case corev1.PodRunning:
conditions := t.Status.Conditions
if conditions == nil {
return false, nil
}
for i := range conditions {
if conditions[i].Type == corev1.PodReady &&
conditions[i].Status == corev1.ConditionTrue {
return true, nil
}
}
}
}
return false, nil
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package describe
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
const (
// LoadBalancerWidth is the width how we describe load balancer
LoadBalancerWidth = 16
// LabelNodeRolePrefix is a label prefix for node roles
// It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
LabelNodeRolePrefix = "node-role.kubernetes.io/"
// NodeLabelRole specifies the role of a node
NodeLabelRole = "kubernetes.io/role"
)
// DescriberFunc gives a way to display the specified RESTMapping type
type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (Describer, error)
// Describer generates output for the named resource or an error
// if the output could not be generated. Implementers typically
// abstract the retrieval of the named object from a remote server.
type Describer interface {
Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error)
}
// DescriberSettings holds display configuration for each object
// describer to control what is printed.
type DescriberSettings struct {
ShowEvents bool
}
// ObjectDescriber is an interface for displaying arbitrary objects with extra
// information. Use when an object is in hand (on disk, or already retrieved).
// Implementers may ignore the additional information passed on extra, or use it
// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer
// is found.
type ObjectDescriber interface {
DescribeObject(object interface{}, extra ...interface{}) (output string, err error)
}
// ErrNoDescriber is a structured error indicating the provided object or objects
// cannot be described.
type ErrNoDescriber struct {
Types []string
}
// Error implements the error interface.
func (e ErrNoDescriber) Error() string {
return fmt.Sprintf("no describer has been defined for %v", e.Types)
}

File diff suppressed because it is too large Load Diff

21
vendor/k8s.io/kubernetes/pkg/kubectl/doc.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kubectl provides the functions used by the kubectl command line tool
// under k8s.io/kubernetes/cmd. The functions are kept in this package to better
// support unit testing. The main() method for kubectl is only an entry point
// and should contain no functionality.
package kubectl // import "k8s.io/kubernetes/pkg/kubectl"

390
vendor/k8s.io/kubernetes/pkg/kubectl/history.go generated vendored Normal file
View File

@ -0,0 +1,390 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"fmt"
"io"
"text/tabwriter"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
kapps "k8s.io/kubernetes/pkg/kubectl/apps"
describe "k8s.io/kubernetes/pkg/kubectl/describe/versioned"
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice"
)
const (
ChangeCauseAnnotation = "kubernetes.io/change-cause"
)
// HistoryViewer provides an interface for resources have historical information.
type HistoryViewer interface {
ViewHistory(namespace, name string, revision int64) (string, error)
}
type HistoryVisitor struct {
clientset kubernetes.Interface
result HistoryViewer
}
func (v *HistoryVisitor) VisitDeployment(elem kapps.GroupKindElement) {
v.result = &DeploymentHistoryViewer{v.clientset}
}
func (v *HistoryVisitor) VisitStatefulSet(kind kapps.GroupKindElement) {
v.result = &StatefulSetHistoryViewer{v.clientset}
}
func (v *HistoryVisitor) VisitDaemonSet(kind kapps.GroupKindElement) {
v.result = &DaemonSetHistoryViewer{v.clientset}
}
func (v *HistoryVisitor) VisitJob(kind kapps.GroupKindElement) {}
func (v *HistoryVisitor) VisitPod(kind kapps.GroupKindElement) {}
func (v *HistoryVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {}
func (v *HistoryVisitor) VisitReplicationController(kind kapps.GroupKindElement) {}
func (v *HistoryVisitor) VisitCronJob(kind kapps.GroupKindElement) {}
// HistoryViewerFor returns an implementation of HistoryViewer interface for the given schema kind
func HistoryViewerFor(kind schema.GroupKind, c kubernetes.Interface) (HistoryViewer, error) {
elem := kapps.GroupKindElement(kind)
visitor := &HistoryVisitor{
clientset: c,
}
// Determine which HistoryViewer we need here
err := elem.Accept(visitor)
if err != nil {
return nil, fmt.Errorf("error retrieving history for %q, %v", kind.String(), err)
}
if visitor.result == nil {
return nil, fmt.Errorf("no history viewer has been implemented for %q", kind.String())
}
return visitor.result, nil
}
type DeploymentHistoryViewer struct {
c kubernetes.Interface
}
// ViewHistory returns a revision-to-replicaset map as the revision history of a deployment
// TODO: this should be a describer
func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) {
versionedAppsClient := h.c.AppsV1()
deployment, err := versionedAppsClient.Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err)
}
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedAppsClient)
if err != nil {
return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err)
}
allRSs := allOldRSs
if newRS != nil {
allRSs = append(allRSs, newRS)
}
historyInfo := make(map[int64]*corev1.PodTemplateSpec)
for _, rs := range allRSs {
v, err := deploymentutil.Revision(rs)
if err != nil {
continue
}
historyInfo[v] = &rs.Spec.Template
changeCause := getChangeCause(rs)
if historyInfo[v].Annotations == nil {
historyInfo[v].Annotations = make(map[string]string)
}
if len(changeCause) > 0 {
historyInfo[v].Annotations[ChangeCauseAnnotation] = changeCause
}
}
if len(historyInfo) == 0 {
return "No rollout history found.", nil
}
if revision > 0 {
// Print details of a specific revision
template, ok := historyInfo[revision]
if !ok {
return "", fmt.Errorf("unable to find the specified revision")
}
return printTemplate(template)
}
// Sort the revisionToChangeCause map by revision
revisions := make([]int64, 0, len(historyInfo))
for r := range historyInfo {
revisions = append(revisions, r)
}
sliceutil.SortInts64(revisions)
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n")
for _, r := range revisions {
// Find the change-cause of revision r
changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation]
if len(changeCause) == 0 {
changeCause = "<none>"
}
fmt.Fprintf(out, "%d\t%s\n", r, changeCause)
}
return nil
})
}
func printTemplate(template *corev1.PodTemplateSpec) (string, error) {
buf := bytes.NewBuffer([]byte{})
w := describe.NewPrefixWriter(buf)
describe.DescribePodTemplate(template, w)
return buf.String(), nil
}
type DaemonSetHistoryViewer struct {
c kubernetes.Interface
}
// ViewHistory returns a revision-to-history map as the revision history of a deployment
// TODO: this should be a describer
func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) {
ds, history, err := daemonSetHistory(h.c.AppsV1(), namespace, name)
if err != nil {
return "", err
}
historyInfo := make(map[int64]*appsv1.ControllerRevision)
for _, history := range history {
// TODO: for now we assume revisions don't overlap, we may need to handle it
historyInfo[history.Revision] = history
}
if len(historyInfo) == 0 {
return "No rollout history found.", nil
}
// Print details of a specific revision
if revision > 0 {
history, ok := historyInfo[revision]
if !ok {
return "", fmt.Errorf("unable to find the specified revision")
}
dsOfHistory, err := applyDaemonSetHistory(ds, history)
if err != nil {
return "", fmt.Errorf("unable to parse history %s", history.Name)
}
return printTemplate(&dsOfHistory.Spec.Template)
}
// Print an overview of all Revisions
// Sort the revisionToChangeCause map by revision
revisions := make([]int64, 0, len(historyInfo))
for r := range historyInfo {
revisions = append(revisions, r)
}
sliceutil.SortInts64(revisions)
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n")
for _, r := range revisions {
// Find the change-cause of revision r
changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation]
if len(changeCause) == 0 {
changeCause = "<none>"
}
fmt.Fprintf(out, "%d\t%s\n", r, changeCause)
}
return nil
})
}
type StatefulSetHistoryViewer struct {
c kubernetes.Interface
}
// ViewHistory returns a list of the revision history of a statefulset
// TODO: this should be a describer
// TODO: needs to implement detailed revision view
func (h *StatefulSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) {
_, history, err := statefulSetHistory(h.c.AppsV1(), namespace, name)
if err != nil {
return "", err
}
if len(history) <= 0 {
return "No rollout history found.", nil
}
revisions := make([]int64, len(history))
for _, revision := range history {
revisions = append(revisions, revision.Revision)
}
sliceutil.SortInts64(revisions)
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "REVISION\n")
for _, r := range revisions {
fmt.Fprintf(out, "%d\n", r)
}
return nil
})
}
// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor
// TODO: Rename this to controllerHistory when other controllers have been upgraded
func controlledHistoryV1(
apps clientappsv1.AppsV1Interface,
namespace string,
selector labels.Selector,
accessor metav1.Object) ([]*appsv1.ControllerRevision, error) {
var result []*appsv1.ControllerRevision
historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, err
}
for i := range historyList.Items {
history := historyList.Items[i]
// Only add history that belongs to the API object
if metav1.IsControlledBy(&history, accessor) {
result = append(result, &history)
}
}
return result, nil
}
// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor
func controlledHistory(
apps clientappsv1.AppsV1Interface,
namespace string,
selector labels.Selector,
accessor metav1.Object) ([]*appsv1.ControllerRevision, error) {
var result []*appsv1.ControllerRevision
historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, err
}
for i := range historyList.Items {
history := historyList.Items[i]
// Only add history that belongs to the API object
if metav1.IsControlledBy(&history, accessor) {
result = append(result, &history)
}
}
return result, nil
}
// daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history.
func daemonSetHistory(
apps clientappsv1.AppsV1Interface,
namespace, name string) (*appsv1.DaemonSet, []*appsv1.ControllerRevision, error) {
ds, err := apps.DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err)
}
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, nil, fmt.Errorf("failed to create selector for DaemonSet %s: %v", ds.Name, err)
}
accessor, err := meta.Accessor(ds)
if err != nil {
return nil, nil, fmt.Errorf("failed to create accessor for DaemonSet %s: %v", ds.Name, err)
}
history, err := controlledHistory(apps, ds.Namespace, selector, accessor)
if err != nil {
return nil, nil, fmt.Errorf("unable to find history controlled by DaemonSet %s: %v", ds.Name, err)
}
return ds, history, nil
}
// statefulSetHistory returns the StatefulSet named name in namespace and all ControllerRevisions in its history.
func statefulSetHistory(
apps clientappsv1.AppsV1Interface,
namespace, name string) (*appsv1.StatefulSet, []*appsv1.ControllerRevision, error) {
sts, err := apps.StatefulSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, nil, fmt.Errorf("failed to retrieve Statefulset %s: %s", name, err.Error())
}
selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector)
if err != nil {
return nil, nil, fmt.Errorf("failed to create selector for StatefulSet %s: %s", name, err.Error())
}
accessor, err := meta.Accessor(sts)
if err != nil {
return nil, nil, fmt.Errorf("failed to obtain accessor for StatefulSet %s: %s", name, err.Error())
}
history, err := controlledHistoryV1(apps, namespace, selector, accessor)
if err != nil {
return nil, nil, fmt.Errorf("unable to find history controlled by StatefulSet %s: %v", name, err)
}
return sts, history, nil
}
// applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet
func applyDaemonSetHistory(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (*appsv1.DaemonSet, error) {
clone := ds.DeepCopy()
cloneBytes, err := json.Marshal(clone)
if err != nil {
return nil, err
}
patched, err := strategicpatch.StrategicMergePatch(cloneBytes, history.Data.Raw, clone)
if err != nil {
return nil, err
}
err = json.Unmarshal(patched, clone)
if err != nil {
return nil, err
}
return clone, nil
}
// TODO: copied here until this becomes a describer
func tabbedString(f func(io.Writer) error) (string, error) {
out := new(tabwriter.Writer)
buf := &bytes.Buffer{}
out.Init(buf, 0, 8, 2, ' ', 0)
err := f(out)
if err != nil {
return "", err
}
out.Flush()
str := string(buf.String())
return str, nil
}
// getChangeCause returns the change-cause annotation of the input object
func getChangeCause(obj runtime.Object) string {
accessor, err := meta.Accessor(obj)
if err != nil {
return ""
}
return accessor.GetAnnotations()[ChangeCauseAnnotation]
}

32
vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"k8s.io/apimachinery/pkg/types"
client "k8s.io/client-go/rest"
)
// RESTClient is a client helper for dealing with RESTful resources
// in a generic way.
type RESTClient interface {
Get() *client.Request
Post() *client.Request
Patch(types.PatchType) *client.Request
Delete() *client.Request
Put() *client.Request
}

486
vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go generated vendored Normal file
View File

@ -0,0 +1,486 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"fmt"
"sort"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
kapps "k8s.io/kubernetes/pkg/kubectl/apps"
"k8s.io/kubernetes/pkg/kubectl/scheme"
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
)
const (
rollbackSuccess = "rolled back"
rollbackSkipped = "skipped rollback"
)
// Rollbacker provides an interface for resources that can be rolled back.
type Rollbacker interface {
Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error)
}
type RollbackVisitor struct {
clientset kubernetes.Interface
result Rollbacker
}
func (v *RollbackVisitor) VisitDeployment(elem kapps.GroupKindElement) {
v.result = &DeploymentRollbacker{v.clientset}
}
func (v *RollbackVisitor) VisitStatefulSet(kind kapps.GroupKindElement) {
v.result = &StatefulSetRollbacker{v.clientset}
}
func (v *RollbackVisitor) VisitDaemonSet(kind kapps.GroupKindElement) {
v.result = &DaemonSetRollbacker{v.clientset}
}
func (v *RollbackVisitor) VisitJob(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitPod(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitReplicationController(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitCronJob(kind kapps.GroupKindElement) {}
// RollbackerFor returns an implementation of Rollbacker interface for the given schema kind
func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) {
elem := kapps.GroupKindElement(kind)
visitor := &RollbackVisitor{
clientset: c,
}
err := elem.Accept(visitor)
if err != nil {
return nil, fmt.Errorf("error retrieving rollbacker for %q, %v", kind.String(), err)
}
if visitor.result == nil {
return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind)
}
return visitor.result, nil
}
type DeploymentRollbacker struct {
c kubernetes.Interface
}
func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) {
if toRevision < 0 {
return "", revisionNotFoundErr(toRevision)
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
}
name := accessor.GetName()
namespace := accessor.GetNamespace()
// TODO: Fix this after kubectl has been removed from core. It is not possible to convert the runtime.Object
// to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're
// currently getting rid of all internal versions of resources. So we specifically request the appsv1 version
// here. This follows the same pattern as for DaemonSet and StatefulSet.
deployment, err := r.c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err)
}
rsForRevision, err := deploymentRevision(deployment, r.c, toRevision)
if err != nil {
return "", err
}
if dryRun {
return printTemplate(&rsForRevision.Spec.Template)
}
if deployment.Spec.Paused {
return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name)
}
// Skip if the revision already matches current Deployment
if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) {
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
}
// remove hash label before patching back into the deployment
delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
// compute deployment annotations
annotations := map[string]string{}
for k := range annotationsToSkip {
if v, ok := deployment.Annotations[k]; ok {
annotations[k] = v
}
}
for k, v := range rsForRevision.Annotations {
if !annotationsToSkip[k] {
annotations[k] = v
}
}
// make patch to restore
patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations)
if err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
// Restore revision
if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
return rollbackSuccess, nil
}
// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// annotationsToSkip lists the annotations that should be preserved from the deployment and not
// copied from the replicaset when rolling a deployment back
var annotationsToSkip = map[string]bool{
corev1.LastAppliedConfigAnnotation: true,
deploymentutil.RevisionAnnotation: true,
deploymentutil.RevisionHistoryAnnotation: true,
deploymentutil.DesiredReplicasAnnotation: true,
deploymentutil.MaxReplicasAnnotation: true,
appsv1.DeprecatedRollbackTo: true,
}
// getPatch returns a patch that can be applied to restore a Deployment to a
// previous version. If the returned error is nil the patch is valid.
func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) {
// Create a patch of the Deployment that replaces spec.template
patch, err := json.Marshal([]interface{}{
map[string]interface{}{
"op": "replace",
"path": "/spec/template",
"value": podTemplate,
},
map[string]interface{}{
"op": "replace",
"path": "/metadata/annotations",
"value": annotations,
},
})
return types.JSONPatchType, patch, err
}
func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) {
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
if err != nil {
return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err)
}
allRSs := allOldRSs
if newRS != nil {
allRSs = append(allRSs, newRS)
}
var (
latestReplicaSet *appsv1.ReplicaSet
latestRevision = int64(-1)
previousReplicaSet *appsv1.ReplicaSet
previousRevision = int64(-1)
)
for _, rs := range allRSs {
if v, err := deploymentutil.Revision(rs); err == nil {
if toRevision == 0 {
if latestRevision < v {
// newest one we've seen so far
previousRevision = latestRevision
previousReplicaSet = latestReplicaSet
latestRevision = v
latestReplicaSet = rs
} else if previousRevision < v {
// second newest one we've seen so far
previousRevision = v
previousReplicaSet = rs
}
} else if toRevision == v {
return rs, nil
}
}
}
if toRevision > 0 {
return nil, revisionNotFoundErr(toRevision)
}
if previousReplicaSet == nil {
return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name)
}
return previousReplicaSet, nil
}
type DaemonSetRollbacker struct {
c kubernetes.Interface
}
func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) {
if toRevision < 0 {
return "", revisionNotFoundErr(toRevision)
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
}
ds, history, err := daemonSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName())
if err != nil {
return "", err
}
if toRevision == 0 && len(history) <= 1 {
return "", fmt.Errorf("no last revision to roll back to")
}
toHistory := findHistory(toRevision, history)
if toHistory == nil {
return "", revisionNotFoundErr(toRevision)
}
if dryRun {
appliedDS, err := applyDaemonSetHistory(ds, toHistory)
if err != nil {
return "", err
}
return printPodTemplate(&appliedDS.Spec.Template)
}
// Skip if the revision already matches current DaemonSet
done, err := daemonSetMatch(ds, toHistory)
if err != nil {
return "", err
}
if done {
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
}
// Restore revision
if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
return rollbackSuccess, nil
}
// daemonMatch check if the given DaemonSet's template matches the template stored in the given history.
func daemonSetMatch(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (bool, error) {
patch, err := getDaemonSetPatch(ds)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getDaemonSetPatch(ds *appsv1.DaemonSet) ([]byte, error) {
dsBytes, err := json.Marshal(ds)
if err != nil {
return nil, err
}
var raw map[string]interface{}
err = json.Unmarshal(dsBytes, &raw)
if err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
// Create a patch of the DaemonSet that replaces spec.template
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
type StatefulSetRollbacker struct {
c kubernetes.Interface
}
// toRevision is a non-negative integer, with 0 being reserved to indicate rolling back to previous configuration
func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) {
if toRevision < 0 {
return "", revisionNotFoundErr(toRevision)
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
}
sts, history, err := statefulSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName())
if err != nil {
return "", err
}
if toRevision == 0 && len(history) <= 1 {
return "", fmt.Errorf("no last revision to roll back to")
}
toHistory := findHistory(toRevision, history)
if toHistory == nil {
return "", revisionNotFoundErr(toRevision)
}
if dryRun {
appliedSS, err := applyRevision(sts, toHistory)
if err != nil {
return "", err
}
return printPodTemplate(&appliedSS.Spec.Template)
}
// Skip if the revision already matches current StatefulSet
done, err := statefulsetMatch(sts, toHistory)
if err != nil {
return "", err
}
if done {
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
}
// Restore revision
if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
return rollbackSuccess, nil
}
var appsCodec = scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion)
// applyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error
// is nil, the returned StatefulSet is valid.
func applyRevision(set *appsv1.StatefulSet, revision *appsv1.ControllerRevision) (*appsv1.StatefulSet, error) {
clone := set.DeepCopy()
patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(appsCodec, clone)), revision.Data.Raw, clone)
if err != nil {
return nil, err
}
err = json.Unmarshal(patched, clone)
if err != nil {
return nil, err
}
return clone, nil
}
// statefulsetMatch check if the given StatefulSet's template matches the template stored in the given history.
func statefulsetMatch(ss *appsv1.StatefulSet, history *appsv1.ControllerRevision) (bool, error) {
patch, err := getStatefulSetPatch(ss)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getStatefulSetPatch returns a strategic merge patch that can be applied to restore a StatefulSet to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getStatefulSetPatch(set *appsv1.StatefulSet) ([]byte, error) {
str, err := runtime.Encode(appsCodec, set)
if err != nil {
return nil, err
}
var raw map[string]interface{}
if err := json.Unmarshal([]byte(str), &raw); err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions.
// It returns nil if no such controllerrevision exists.
// If toRevision is 0, the last previously used history is returned.
func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision {
if toRevision == 0 && len(allHistory) <= 1 {
return nil
}
// Find the history to rollback to
var toHistory *appsv1.ControllerRevision
if toRevision == 0 {
// If toRevision == 0, find the latest revision (2nd max)
sort.Sort(historiesByRevision(allHistory))
toHistory = allHistory[len(allHistory)-2]
} else {
for _, h := range allHistory {
if h.Revision == toRevision {
// If toRevision != 0, find the history with matching revision
return h
}
}
}
return toHistory
}
// printPodTemplate converts a given pod template into a human-readable string.
func printPodTemplate(specTemplate *corev1.PodTemplateSpec) (string, error) {
podSpec, err := printTemplate(specTemplate)
if err != nil {
return "", err
}
return fmt.Sprintf("will roll back to %s", podSpec), nil
}
func revisionNotFoundErr(r int64) error {
return fmt.Errorf("unable to find specified revision %v in history", r)
}
// TODO: copied from daemon controller, should extract to a library
type historiesByRevision []*appsv1.ControllerRevision
func (h historiesByRevision) Len() int { return len(h) }
func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h historiesByRevision) Less(i, j int) bool {
return h[i].Revision < h[j].Revision
}

849
vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go generated vendored Normal file
View File

@ -0,0 +1,849 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
"io"
"strconv"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/pkg/kubectl/util"
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
"k8s.io/kubernetes/pkg/kubectl/util/podutils"
"k8s.io/utils/integer"
)
func newInt32Ptr(val int) *int32 {
ret := int32(val)
return &ret
}
func valOrZero(val *int32) int32 {
if val == nil {
return int32(0)
}
return *val
}
const (
kubectlAnnotationPrefix = "kubectl.kubernetes.io/"
sourceIDAnnotation = kubectlAnnotationPrefix + "update-source-id"
desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas"
originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas"
nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id"
)
// RollingUpdaterConfig is the configuration for a rolling deployment process.
type RollingUpdaterConfig struct {
// Out is a writer for progress output.
Out io.Writer
// OldRC is an existing controller to be replaced.
OldRc *corev1.ReplicationController
// NewRc is a controller that will take ownership of updated pods (will be
// created if needed).
NewRc *corev1.ReplicationController
// UpdatePeriod is the time to wait between individual pod updates.
UpdatePeriod time.Duration
// Interval is the time to wait between polling controller status after
// update.
Interval time.Duration
// Timeout is the time to wait for controller updates before giving up.
Timeout time.Duration
// MinReadySeconds is the number of seconds to wait after the pods are ready
MinReadySeconds int32
// CleanupPolicy defines the cleanup action to take after the deployment is
// complete.
CleanupPolicy RollingUpdaterCleanupPolicy
// MaxUnavailable is the maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up.
// This can not be 0 if MaxSurge is 0.
// By default, a fixed value of 1 is used.
// Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
// immediately when the rolling update starts. Once new pods are ready, old RC
// can be scaled down further, followed by scaling up the new RC, ensuring
// that the total number of pods available at all times during the update is at
// least 70% of desired pods.
MaxUnavailable intstr.IntOrString
// MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// This can not be 0 if MaxUnavailable is 0.
// Absolute number is calculated from percentage by rounding up.
// By default, a value of 1 is used.
// Example: when this is set to 30%, the new RC can be scaled up immediately
// when the rolling update starts, such that the total number of old and new pods do not exceed
// 130% of desired pods. Once old pods have been killed, new RC can be scaled up
// further, ensuring that total number of pods running at any time during
// the update is at most 130% of desired pods.
MaxSurge intstr.IntOrString
// OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or
// abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value
// is a synthetic "progress" calculation that represents the approximate percentage completion.
OnProgress func(oldRc, newRc *corev1.ReplicationController, percentage int) error
}
// RollingUpdaterCleanupPolicy is a cleanup action to take after the
// deployment is complete.
type RollingUpdaterCleanupPolicy string
const (
// DeleteRollingUpdateCleanupPolicy means delete the old controller.
DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete"
// PreserveRollingUpdateCleanupPolicy means keep the old controller.
PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve"
// RenameRollingUpdateCleanupPolicy means delete the old controller, and rename
// the new controller to the name of the old controller.
RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename"
)
// RollingUpdater provides methods for updating replicated pods in a predictable,
// fault-tolerant way.
type RollingUpdater struct {
rcClient corev1client.ReplicationControllersGetter
podClient corev1client.PodsGetter
scaleClient scaleclient.ScalesGetter
// Namespace for resources
ns string
// scaleAndWait scales a controller and returns its updated state.
scaleAndWait func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error)
//getOrCreateTargetController gets and validates an existing controller or
//makes a new one.
getOrCreateTargetController func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error)
// cleanup performs post deployment cleanup tasks for newRc and oldRc.
cleanup func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error
// getReadyPods returns the amount of old and new ready pods.
getReadyPods func(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error)
// nowFn returns the current time used to calculate the minReadySeconds
nowFn func() metav1.Time
}
// NewRollingUpdater creates a RollingUpdater from a client.
func NewRollingUpdater(namespace string, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater {
updater := &RollingUpdater{
rcClient: rcClient,
podClient: podClient,
scaleClient: sc,
ns: namespace,
}
// Inject real implementations.
updater.scaleAndWait = updater.scaleAndWaitWithScaler
updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient
updater.getReadyPods = updater.readyPods
updater.cleanup = updater.cleanupWithClients
updater.nowFn = func() metav1.Time { return metav1.Now() }
return updater
}
// Update all pods for a ReplicationController (oldRc) by creating a new
// controller (newRc) with 0 replicas, and synchronously scaling oldRc and
// newRc until oldRc has 0 replicas and newRc has the original # of desired
// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy.
//
// Each interval, the updater will attempt to make progress however it can
// without violating any availability constraints defined by the config. This
// means the amount scaled up or down each interval will vary based on the
// timeliness of readiness and the updater will always try to make progress,
// even slowly.
//
// If an update from newRc to oldRc is already in progress, we attempt to
// drive it to completion. If an error occurs at any step of the update, the
// error will be returned.
//
// A scaling event (either up or down) is considered progress; if no progress
// is made within the config.Timeout, an error is returned.
//
// TODO: make this handle performing a rollback of a partially completed
// rollout.
func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
out := config.Out
oldRc := config.OldRc
scaleRetryParams := NewRetryParams(config.Interval, config.Timeout)
// Find an existing controller (for continuing an interrupted update) or
// create a new one if necessary.
sourceID := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID)
newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceID)
if err != nil {
return err
}
if existed {
fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name)
} else {
fmt.Fprintf(out, "Created %s\n", newRc.Name)
}
// Extract the desired replica count from the controller.
desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation])
if err != nil {
return fmt.Errorf("Unable to parse annotation for %s: %s=%s",
newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation])
}
desired := int32(desiredAnnotation)
// Extract the original replica count from the old controller, adding the
// annotation if it doesn't yet exist.
_, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation]
if !hasOriginalAnnotation {
existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name, metav1.GetOptions{})
if err != nil {
return err
}
originReplicas := strconv.Itoa(int(valOrZero(existing.Spec.Replicas)))
applyUpdate := func(rc *corev1.ReplicationController) {
if rc.Annotations == nil {
rc.Annotations = map[string]string{}
}
rc.Annotations[originalReplicasAnnotation] = originReplicas
}
if oldRc, err = updateRcWithRetries(r.rcClient, existing.Namespace, existing, applyUpdate); err != nil {
return err
}
}
// maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods
// that can be unavailable during a rollout.
maxSurge, maxUnavailable, err := deploymentutil.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired)
if err != nil {
return err
}
// Validate maximums.
if desired > 0 && maxUnavailable == 0 && maxSurge == 0 {
return fmt.Errorf("one of maxSurge or maxUnavailable must be specified")
}
// The minimum pods which must remain available throughout the update
// calculated for internal convenience.
minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable)))
// If the desired new scale is 0, then the max unavailable is necessarily
// the effective scale of the old RC regardless of the configuration
// (equivalent to 100% maxUnavailable).
if desired == 0 {
maxUnavailable = valOrZero(oldRc.Spec.Replicas)
minAvailable = 0
}
fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n",
newRc.Name, valOrZero(newRc.Spec.Replicas), desired, oldRc.Name, valOrZero(oldRc.Spec.Replicas), minAvailable, desired+maxSurge)
// give a caller incremental notification and allow them to exit early
goal := desired - valOrZero(newRc.Spec.Replicas)
if goal < 0 {
goal = -goal
}
progress := func(complete bool) error {
if config.OnProgress == nil {
return nil
}
progress := desired - valOrZero(newRc.Spec.Replicas)
if progress < 0 {
progress = -progress
}
percentage := 100
if !complete && goal > 0 {
percentage = int((goal - progress) * 100 / goal)
}
return config.OnProgress(oldRc, newRc, percentage)
}
// Scale newRc and oldRc until newRc has the desired number of replicas and
// oldRc has 0 replicas.
progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds()
for valOrZero(newRc.Spec.Replicas) != desired || valOrZero(oldRc.Spec.Replicas) != 0 {
// Store the existing replica counts for progress timeout tracking.
newReplicas := valOrZero(newRc.Spec.Replicas)
oldReplicas := valOrZero(oldRc.Spec.Replicas)
// Scale up as much as possible.
scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config)
if err != nil {
return err
}
newRc = scaledRc
// notify the caller if necessary
if err := progress(false); err != nil {
return err
}
// Wait between scaling operations for things to settle.
time.Sleep(config.UpdatePeriod)
// Scale down as much as possible.
scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config)
if err != nil {
return err
}
oldRc = scaledRc
// notify the caller if necessary
if err := progress(false); err != nil {
return err
}
// If we are making progress, continue to advance the progress deadline.
// Otherwise, time out with an error.
progressMade := (valOrZero(newRc.Spec.Replicas) != newReplicas) || (valOrZero(oldRc.Spec.Replicas) != oldReplicas)
if progressMade {
progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds()
} else if time.Now().UnixNano() > progressDeadline {
return fmt.Errorf("timed out waiting for any update progress to be made")
}
}
// notify the caller if necessary
if err := progress(true); err != nil {
return err
}
// Housekeeping and cleanup policy execution.
return r.cleanup(oldRc, newRc, config)
}
// scaleUp scales up newRc to desired by whatever increment is possible given
// the configured surge threshold. scaleUp will safely no-op as necessary when
// it detects redundancy or other relevant conditions.
func (r *RollingUpdater) scaleUp(newRc, oldRc *corev1.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) {
// If we're already at the desired, do nothing.
if valOrZero(newRc.Spec.Replicas) == desired {
return newRc, nil
}
// Scale up as far as we can based on the surge limit.
increment := (desired + maxSurge) - (valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas))
// If the old is already scaled down, go ahead and scale all the way up.
if valOrZero(oldRc.Spec.Replicas) == 0 {
increment = desired - valOrZero(newRc.Spec.Replicas)
}
// We can't scale up without violating the surge limit, so do nothing.
if increment <= 0 {
return newRc, nil
}
// Increase the replica count, and deal with fenceposts.
nextVal := valOrZero(newRc.Spec.Replicas) + increment
newRc.Spec.Replicas = &nextVal
if valOrZero(newRc.Spec.Replicas) > desired {
newRc.Spec.Replicas = &desired
}
// Perform the scale-up.
fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, valOrZero(newRc.Spec.Replicas))
scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams)
if err != nil {
return nil, err
}
return scaledRc, nil
}
// scaleDown scales down oldRc to 0 at whatever decrement possible given the
// thresholds defined on the config. scaleDown will safely no-op as necessary
// when it detects redundancy or other relevant conditions.
func (r *RollingUpdater) scaleDown(newRc, oldRc *corev1.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) {
// Already scaled down; do nothing.
if valOrZero(oldRc.Spec.Replicas) == 0 {
return oldRc, nil
}
// Get ready pods. We shouldn't block, otherwise in case both old and new
// pods are unavailable then the rolling update process blocks.
// Timeout-wise we are already covered by the progress check.
_, newAvailable, err := r.getReadyPods(oldRc, newRc, config.MinReadySeconds)
if err != nil {
return nil, err
}
// The old controller is considered as part of the total because we want to
// maintain minimum availability even with a volatile old controller.
// Scale down as much as possible while maintaining minimum availability
allPods := valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas)
newUnavailable := valOrZero(newRc.Spec.Replicas) - newAvailable
decrement := allPods - minAvailable - newUnavailable
// The decrement normally shouldn't drop below 0 because the available count
// always starts below the old replica count, but the old replica count can
// decrement due to externalities like pods death in the replica set. This
// will be considered a transient condition; do nothing and try again later
// with new readiness values.
//
// If the most we can scale is 0, it means we can't scale down without
// violating the minimum. Do nothing and try again later when conditions may
// have changed.
if decrement <= 0 {
return oldRc, nil
}
// Reduce the replica count, and deal with fenceposts.
nextOldVal := valOrZero(oldRc.Spec.Replicas) - decrement
oldRc.Spec.Replicas = &nextOldVal
if valOrZero(oldRc.Spec.Replicas) < 0 {
oldRc.Spec.Replicas = newInt32Ptr(0)
}
// If the new is already fully scaled and available up to the desired size, go
// ahead and scale old all the way down.
if valOrZero(newRc.Spec.Replicas) == desired && newAvailable == desired {
oldRc.Spec.Replicas = newInt32Ptr(0)
}
// Perform the scale-down.
fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, valOrZero(oldRc.Spec.Replicas))
retryWait := &RetryParams{config.Interval, config.Timeout}
scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait)
if err != nil {
return nil, err
}
return scaledRc, nil
}
// scalerScaleAndWait scales a controller using a Scaler and a real client.
func (r *RollingUpdater) scaleAndWaitWithScaler(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) {
scaler := NewScaler(r.scaleClient)
if err := scaler.Scale(rc.Namespace, rc.Name, uint(valOrZero(rc.Spec.Replicas)), &ScalePrecondition{-1, ""}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil {
return nil, err
}
return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{})
}
// readyPods returns the old and new ready counts for their pods.
// If a pod is observed as being ready, it's considered ready even
// if it later becomes notReady.
func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) {
controllers := []*corev1.ReplicationController{oldRc, newRc}
oldReady := int32(0)
newReady := int32(0)
if r.nowFn == nil {
r.nowFn = func() metav1.Time { return metav1.Now() }
}
for i := range controllers {
controller := controllers[i]
selector := labels.Set(controller.Spec.Selector).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := r.podClient.Pods(controller.Namespace).List(options)
if err != nil {
return 0, 0, err
}
for _, v1Pod := range pods.Items {
// Do not count deleted pods as ready
if v1Pod.DeletionTimestamp != nil {
continue
}
if !podutils.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) {
continue
}
switch controller.Name {
case oldRc.Name:
oldReady++
case newRc.Name:
newReady++
}
}
}
return oldReady, newReady, nil
}
// getOrCreateTargetControllerWithClient looks for an existing controller with
// sourceID. If found, the existing controller is returned with true
// indicating that the controller already exists. If the controller isn't
// found, a new one is created and returned along with false indicating the
// controller was created.
//
// Existing controllers are validated to ensure their sourceIDAnnotation
// matches sourceID; if there's a mismatch, an error is returned.
func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) {
existingRc, err := r.existingController(controller)
if err != nil {
if !errors.IsNotFound(err) {
// There was an error trying to find the controller; don't assume we
// should create it.
return nil, false, err
}
if valOrZero(controller.Spec.Replicas) <= 0 {
return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d", controller.Name, valOrZero(controller.Spec.Replicas))
}
// The controller wasn't found, so create it.
if controller.Annotations == nil {
controller.Annotations = map[string]string{}
}
controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas))
controller.Annotations[sourceIDAnnotation] = sourceID
controller.Spec.Replicas = newInt32Ptr(0)
newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller)
return newRc, false, err
}
// Validate and use the existing controller.
annotations := existingRc.Annotations
source := annotations[sourceIDAnnotation]
_, ok := annotations[desiredReplicasAnnotation]
if source != sourceID || !ok {
return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceID, annotations)
}
return existingRc, true, nil
}
// existingController verifies if the controller already exists
func (r *RollingUpdater) existingController(controller *corev1.ReplicationController) (*corev1.ReplicationController, error) {
// without rc name but generate name, there's no existing rc
if len(controller.Name) == 0 && len(controller.GenerateName) > 0 {
return nil, errors.NewNotFound(corev1.Resource("replicationcontrollers"), controller.Name)
}
// controller name is required to get rc back
return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{})
}
// cleanupWithClients performs cleanup tasks after the rolling update. Update
// process related annotations are removed from oldRc and newRc. The
// CleanupPolicy on config is executed.
func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error {
// Clean up annotations
var err error
newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{})
if err != nil {
return err
}
applyUpdate := func(rc *corev1.ReplicationController) {
delete(rc.Annotations, sourceIDAnnotation)
delete(rc.Annotations, desiredReplicasAnnotation)
}
if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil {
return err
}
if err = wait.Poll(config.Interval, config.Timeout, ControllerHasDesiredReplicas(r.rcClient, newRc)); err != nil {
return err
}
newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{})
if err != nil {
return err
}
switch config.CleanupPolicy {
case DeleteRollingUpdateCleanupPolicy:
// delete old rc
fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name)
return r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil)
case RenameRollingUpdateCleanupPolicy:
// delete old rc
fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name)
if err := r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil); err != nil {
return err
}
fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name)
return Rename(r.rcClient, newRc, oldRc.Name)
case PreserveRollingUpdateCleanupPolicy:
return nil
default:
return nil
}
}
func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationController, newName string) error {
oldName := rc.Name
rc.Name = newName
rc.ResourceVersion = ""
// First delete the oldName RC and orphan its pods.
policy := metav1.DeletePropagationOrphan
err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{PropagationPolicy: &policy})
if err != nil && !errors.IsNotFound(err) {
return err
}
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
_, err := c.ReplicationControllers(rc.Namespace).Get(oldName, metav1.GetOptions{})
if err == nil {
return false, nil
} else if errors.IsNotFound(err) {
return true, nil
} else {
return false, err
}
})
if err != nil {
return err
}
// Then create the same RC with the new name.
_, err = c.ReplicationControllers(rc.Namespace).Create(rc)
return err
}
func LoadExistingNextReplicationController(c corev1client.ReplicationControllersGetter, namespace, newName string) (*corev1.ReplicationController, error) {
if len(newName) == 0 {
return nil, nil
}
newRc, err := c.ReplicationControllers(namespace).Get(newName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return nil, nil
}
return newRc, err
}
type NewControllerConfig struct {
Namespace string
OldName, NewName string
Image string
Container string
DeploymentKey string
PullPolicy corev1.PullPolicy
}
func CreateNewControllerFromCurrentController(rcClient corev1client.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*corev1.ReplicationController, error) {
containerIndex := 0
// load the old RC into the "new" RC
newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if len(cfg.Container) != 0 {
containerFound := false
for i, c := range newRc.Spec.Template.Spec.Containers {
if c.Name == cfg.Container {
containerIndex = i
containerFound = true
break
}
}
if !containerFound {
return nil, fmt.Errorf("container %s not found in pod", cfg.Container)
}
}
if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 {
return nil, fmt.Errorf("must specify container to update when updating a multi-container pod")
}
if len(newRc.Spec.Template.Spec.Containers) == 0 {
return nil, fmt.Errorf("pod has no containers! (%v)", newRc)
}
newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image
if len(cfg.PullPolicy) != 0 {
newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy
}
newHash, err := util.HashObject(newRc, codec)
if err != nil {
return nil, err
}
if len(cfg.NewName) == 0 {
cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
}
newRc.Name = cfg.NewName
newRc.Spec.Selector[cfg.DeploymentKey] = newHash
newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
return newRc, nil
}
func AbortRollingUpdate(c *RollingUpdaterConfig) error {
// Swap the controllers
tmp := c.OldRc
c.OldRc = c.NewRc
c.NewRc = tmp
if c.NewRc.Annotations == nil {
c.NewRc.Annotations = map[string]string{}
}
c.NewRc.Annotations[sourceIDAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID)
// Use the original value since the replica count change from old to new
// could be asymmetric. If we don't know the original count, we can't safely
// roll back to a known good size.
originalSize, foundOriginal := tmp.Annotations[originalReplicasAnnotation]
if !foundOriginal {
return fmt.Errorf("couldn't find original replica count of %q", tmp.Name)
}
fmt.Fprintf(c.Out, "Setting %q replicas to %s\n", c.NewRc.Name, originalSize)
c.NewRc.Annotations[desiredReplicasAnnotation] = originalSize
c.CleanupPolicy = DeleteRollingUpdateCleanupPolicy
return nil
}
func GetNextControllerAnnotation(rc *corev1.ReplicationController) (string, bool) {
res, found := rc.Annotations[nextControllerAnnotation]
return res, found
}
func SetNextControllerAnnotation(rc *corev1.ReplicationController, name string) {
if rc.Annotations == nil {
rc.Annotations = map[string]string{}
}
rc.Annotations[nextControllerAnnotation] = name
}
func UpdateExistingReplicationController(rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, oldRc *corev1.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*corev1.ReplicationController, error) {
if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
SetNextControllerAnnotation(oldRc, newName)
return AddDeploymentKeyToReplicationController(oldRc, rcClient, podClient, deploymentKey, deploymentValue, namespace, out)
}
// If we didn't need to update the controller for the deployment key, we still need to write
// the "next" controller.
applyUpdate := func(rc *corev1.ReplicationController) {
SetNextControllerAnnotation(rc, newName)
}
return updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate)
}
func AddDeploymentKeyToReplicationController(oldRc *corev1.ReplicationController, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*corev1.ReplicationController, error) {
var err error
// First, update the template label. This ensures that any newly created pods will have the new label
applyUpdate := func(rc *corev1.ReplicationController) {
if rc.Spec.Template.Labels == nil {
rc.Spec.Template.Labels = map[string]string{}
}
rc.Spec.Template.Labels[deploymentKey] = deploymentValue
}
if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil {
return nil, err
}
// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
// TODO: extract the code from the label command and re-use it here.
selector := labels.SelectorFromSet(oldRc.Spec.Selector)
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := podClient.Pods(namespace).List(options)
if err != nil {
return nil, err
}
for ix := range podList.Items {
pod := &podList.Items[ix]
applyUpdate := func(p *corev1.Pod) {
if p.Labels == nil {
p.Labels = map[string]string{
deploymentKey: deploymentValue,
}
} else {
p.Labels[deploymentKey] = deploymentValue
}
}
if pod, err = updatePodWithRetries(podClient, namespace, pod, applyUpdate); err != nil {
return nil, err
}
}
if oldRc.Spec.Selector == nil {
oldRc.Spec.Selector = map[string]string{}
}
// Copy the old selector, so that we can scrub out any orphaned pods
selectorCopy := map[string]string{}
for k, v := range oldRc.Spec.Selector {
selectorCopy[k] = v
}
applyUpdate = func(rc *corev1.ReplicationController) {
rc.Spec.Selector[deploymentKey] = deploymentValue
}
// Update the selector of the rc so it manages all the pods we updated above
if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil {
return nil, err
}
// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
// doesn't see the update to its pod template and creates a new pod with the old labels after
// we've finished re-adopting existing pods to the rc.
selector = labels.SelectorFromSet(selectorCopy)
options = metav1.ListOptions{LabelSelector: selector.String()}
if podList, err = podClient.Pods(namespace).List(options); err != nil {
return nil, err
}
for ix := range podList.Items {
pod := &podList.Items[ix]
if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
if err := podClient.Pods(namespace).Delete(pod.Name, nil); err != nil {
return nil, err
}
}
}
return oldRc, nil
}
type updateRcFunc func(controller *corev1.ReplicationController)
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updateRcWithRetries(rcClient corev1client.ReplicationControllersGetter, namespace string, rc *corev1.ReplicationController, applyUpdate updateRcFunc) (*corev1.ReplicationController, error) {
// Deep copy the rc in case we failed on Get during retry loop
oldRc := rc.DeepCopy()
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil {
// rc contains the latest controller post update
return
}
updateErr := e
// Update the controller with the latest resource version, if the update failed we
// can't trust rc so use oldRc.Name.
if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name, metav1.GetOptions{}); e != nil {
// The Get failed: Value in rc cannot be trusted.
rc = oldRc
}
// Only return the error from update
return updateErr
})
// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
// controller contains the applied update.
return rc, err
}
type updatePodFunc func(controller *corev1.Pod)
// updatePodWithRetries retries updating the given pod on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, pod *corev1.Pod, applyUpdate updatePodFunc) (*corev1.Pod, error) {
// Deep copy the pod in case we failed on Get during retry loop
oldPod := pod.DeepCopy()
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(pod)
if pod, e = podClient.Pods(namespace).Update(pod); e == nil {
return
}
updateErr := e
if pod, e = podClient.Pods(namespace).Get(oldPod.Name, metav1.GetOptions{}); e != nil {
pod = oldPod
}
// Only return the error from update
return updateErr
})
// If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned
// controller contains the applied update.
return pod, err
}
func FindSourceController(r corev1client.ReplicationControllersGetter, namespace, name string) (*corev1.ReplicationController, error) {
list, err := r.ReplicationControllers(namespace).List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for ix := range list.Items {
rc := &list.Items[ix]
if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIDAnnotation], name) {
return rc, nil
}
}
return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name)
}

153
vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/kubectl/scheme"
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
)
// StatusViewer provides an interface for resources that have rollout status.
type StatusViewer interface {
Status(obj runtime.Unstructured, revision int64) (string, bool, error)
}
// StatusViewerFor returns a StatusViewer for the resource specified by kind.
func StatusViewerFor(kind schema.GroupKind) (StatusViewer, error) {
switch kind {
case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(),
appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind():
return &DeploymentStatusViewer{}, nil
case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(),
appsv1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind():
return &DaemonSetStatusViewer{}, nil
case appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind():
return &StatefulSetStatusViewer{}, nil
}
return nil, fmt.Errorf("no status viewer has been implemented for %v", kind)
}
// DeploymentStatusViewer implements the StatusViewer interface.
type DeploymentStatusViewer struct{}
// DaemonSetStatusViewer implements the StatusViewer interface.
type DaemonSetStatusViewer struct{}
// StatefulSetStatusViewer implements the StatusViewer interface.
type StatefulSetStatusViewer struct{}
// Status returns a message describing deployment status, and a bool value indicating if the status is considered done.
func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) {
deployment := &appsv1.Deployment{}
err := scheme.Scheme.Convert(obj, deployment, nil)
if err != nil {
return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, deployment, err)
}
if revision > 0 {
deploymentRev, err := deploymentutil.Revision(deployment)
if err != nil {
return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err)
}
if revision != deploymentRev {
return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev)
}
}
if deployment.Generation <= deployment.Status.ObservedGeneration {
cond := deploymentutil.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name)
}
if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil
}
if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil
}
if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil
}
return fmt.Sprintf("deployment %q successfully rolled out\n", deployment.Name), true, nil
}
return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil
}
// Status returns a message describing daemon set status, and a bool value indicating if the status is considered done.
func (s *DaemonSetStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) {
//ignoring revision as DaemonSets does not have history yet
daemon := &appsv1.DaemonSet{}
err := scheme.Scheme.Convert(obj, daemon, nil)
if err != nil {
return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, daemon, err)
}
if daemon.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateStatefulSetStrategyType)
}
if daemon.Generation <= daemon.Status.ObservedGeneration {
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...\n", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil
}
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...\n", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil
}
return fmt.Sprintf("daemon set %q successfully rolled out\n", daemon.Name), true, nil
}
return fmt.Sprintf("Waiting for daemon set spec update to be observed...\n"), false, nil
}
// Status returns a message describing statefulset status, and a bool value indicating if the status is considered done.
func (s *StatefulSetStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) {
sts := &appsv1.StatefulSet{}
err := scheme.Scheme.Convert(obj, sts, nil)
if err != nil {
return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, sts, err)
}
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateStatefulSetStrategyType)
}
if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration {
return "Waiting for statefulset spec update to be observed...\n", false, nil
}
if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas {
return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil
}
if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil {
if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) {
return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n",
sts.Status.UpdatedReplicas, *sts.Spec.Replicas-*sts.Spec.UpdateStrategy.RollingUpdate.Partition), false, nil
}
}
return fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n",
sts.Status.UpdatedReplicas), true, nil
}
if sts.Status.UpdateRevision != sts.Status.CurrentRevision {
return fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...\n",
sts.Status.UpdatedReplicas, sts.Status.UpdateRevision), false, nil
}
return fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...\n", sts.Status.CurrentReplicas, sts.Status.CurrentRevision), true, nil
}

186
vendor/k8s.io/kubernetes/pkg/kubectl/scale.go generated vendored Normal file
View File

@ -0,0 +1,186 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
"strconv"
"time"
autoscalingapi "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
scaleclient "k8s.io/client-go/scale"
)
// Scaler provides an interface for resources that can be scaled.
type Scaler interface {
// Scale scales the named resource after checking preconditions. It optionally
// retries in the event of resource version mismatch (if retry is not nil),
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
// TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed.
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams, gr schema.GroupResource) error
// ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but
// a necessary building block for Scale
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error)
}
// NewScaler get a scaler for a given resource
func NewScaler(scalesGetter scaleclient.ScalesGetter) Scaler {
return &genericScaler{scalesGetter}
}
// ScalePrecondition describes a condition that must be true for the scale to take place
// If CurrentSize == -1, it is ignored.
// If CurrentResourceVersion is the empty string, it is ignored.
// Otherwise they must equal the values in the resource for it to be valid.
type ScalePrecondition struct {
Size int
ResourceVersion string
}
// A PreconditionError is returned when a resource fails to match
// the scale preconditions passed to kubectl.
type PreconditionError struct {
Precondition string
ExpectedValue string
ActualValue string
}
func (pe PreconditionError) Error() string {
return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue)
}
// RetryParams encapsulates the retry parameters used by kubectl's scaler.
type RetryParams struct {
Interval, Timeout time.Duration
}
func NewRetryParams(interval, timeout time.Duration) *RetryParams {
return &RetryParams{interval, timeout}
}
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gr schema.GroupResource) wait.ConditionFunc {
return func() (bool, error) {
rv, err := r.ScaleSimple(namespace, name, precondition, count, gr)
if updatedResourceVersion != nil {
*updatedResourceVersion = rv
}
// Retry only on update conflicts.
if errors.IsConflict(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
}
// validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error
func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) error {
if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size {
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(scale.Spec.Replicas))}
}
if len(precondition.ResourceVersion) > 0 && scale.ResourceVersion != precondition.ResourceVersion {
return PreconditionError{"resource version", precondition.ResourceVersion, scale.ResourceVersion}
}
return nil
}
// genericScaler can update scales for resources in a particular namespace
type genericScaler struct {
scaleNamespacer scaleclient.ScalesGetter
}
var _ Scaler = &genericScaler{}
// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful.
func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error) {
scale, err := s.scaleNamespacer.Scales(namespace).Get(gr, name)
if err != nil {
return "", err
}
if preconditions != nil {
if err := preconditions.validate(scale); err != nil {
return "", err
}
}
scale.Spec.Replicas = int32(newSize)
updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(gr, scale)
if err != nil {
return "", err
}
return updatedScale.ResourceVersion, nil
}
// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams, gr schema.GroupResource) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gr)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
return WaitForScaleHasDesiredReplicas(s.scaleNamespacer, gr, resourceName, namespace, newSize, waitForReplicas)
}
return nil
}
// scaleHasDesiredReplicas returns a condition that will be true if and only if the desired replica
// count for a scale (Spec) equals its updated replicas count (Status)
func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, desiredReplicas int32) wait.ConditionFunc {
return func() (bool, error) {
actualScale, err := sClient.Scales(namespace).Get(gr, resourceName)
if err != nil {
return false, err
}
// this means the desired scale target has been reset by something else
if actualScale.Spec.Replicas != desiredReplicas {
return true, nil
}
return actualScale.Spec.Replicas == actualScale.Status.Replicas &&
desiredReplicas == actualScale.Status.Replicas, nil
}
}
// WaitForScaleHasDesiredReplicas waits until condition scaleHasDesiredReplicas is satisfied
// or returns error when timeout happens
func WaitForScaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, newSize uint, waitForReplicas *RetryParams) error {
if waitForReplicas == nil {
return fmt.Errorf("waitForReplicas parameter cannot be nil")
}
err := wait.PollImmediate(
waitForReplicas.Interval,
waitForReplicas.Timeout,
scaleHasDesiredReplicas(sClient, gr, resourceName, namespace, int32(newSize)))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
}
return err
}

78
vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
admissionv1alpha1 "k8s.io/api/admission/v1beta1"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
authenticationv1 "k8s.io/api/authentication/v1"
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
authorizationv1 "k8s.io/api/authorization/v1"
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
networkingv1 "k8s.io/api/networking/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
)
// Register all groups in the kubectl's registry, but no componentconfig group since it's not in k8s.io/api
// The code in this file mostly duplicate the install under k8s.io/kubernetes/pkg/api and k8s.io/kubernetes/pkg/apis,
// but does NOT register the internal types.
func init() {
// Register external types for Scheme
metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(scheme.AddToScheme(Scheme))
utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(admissionv1alpha1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(authorizationv1.SchemeGroupVersion, authorizationv1beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(autoscalingv1.SchemeGroupVersion, autoscalingv2beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(batchv1.SchemeGroupVersion, batchv1beta1.SchemeGroupVersion, batchv2alpha1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(certificatesv1beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(extensionsv1beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(imagepolicyv1alpha1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(networkingv1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(policyv1beta1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, rbacv1alpha1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(schedulingv1alpha1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(settingsv1alpha1.SchemeGroupVersion))
utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion))
}

39
vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
// All kubectl code should eventually switch to use this Registry and Scheme instead of the global ones.
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
var Scheme = runtime.NewScheme()
// Codecs provides access to encoding and decoding for the scheme
var Codecs = serializer.NewCodecFactory(Scheme)
// ParameterCodec handles versioning of objects that are converted to query parameters.
var ParameterCodec = runtime.NewParameterCodec(Scheme)
// DefaultJSONEncoder returns a default encoder for our scheme
func DefaultJSONEncoder() runtime.Encoder {
return unstructured.JSONFallbackEncoder{Encoder: Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...)}
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"crypto/x509"
"encoding/pem"
"errors"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
)
// TODO(yue9944882): Remove this helper package once it's copied to k/api
// ParseCSR extracts the CSR from the API object and decodes it.
func ParseCSR(obj *certificatesv1beta1.CertificateSigningRequest) (*x509.CertificateRequest, error) {
// extract PEM from request object
pemBytes := obj.Spec.Request
block, _ := pem.Decode(pemBytes)
if block == nil || block.Type != "CERTIFICATE REQUEST" {
return nil, errors.New("PEM block type must be CERTIFICATE REQUEST")
}
csr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
return csr, nil
}

View File

@ -0,0 +1,230 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"sort"
"strconv"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
)
const (
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
RevisionAnnotation = "deployment.kubernetes.io/revision"
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
// in its replica sets. Helps in separating scaling events from the rollout process and for
// determining if the new replica set for a deployment is really saturated.
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
// proportions in case the deployment has surge replicas.
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
// RollbackRevisionNotFound is not found rollback event reason
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
// RollbackTemplateUnchanged is the template unchanged rollback event reason
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
// RollbackDone is the done rollback event reason
RollbackDone = "DeploymentRollback"
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
)
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// Revision returns the revision number of the input object.
func Revision(obj runtime.Object) (int64, error) {
acc, err := meta.Accessor(obj)
if err != nil {
return 0, err
}
v, ok := acc.GetAnnotations()[RevisionAnnotation]
if !ok {
return 0, nil
}
return strconv.ParseInt(v, 10, 64)
}
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and
// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones
// with no pods, and the second set of old replica sets include all old replica sets. The third returned value
// is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) {
rsList, err := listReplicaSets(deployment, rsListFromClient(c))
if err != nil {
return nil, nil, nil, err
}
oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList)
newRS := findNewReplicaSet(deployment, rsList)
return oldRSes, allOldRSes, newRS, nil
}
// RsListFromClient returns an rsListFunc that wraps the given client.
func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(options)
if err != nil {
return nil, err
}
var ret []*appsv1.ReplicaSet
for i := range rsList.Items {
ret = append(ret, &rsList.Items[i])
}
return ret, err
}
}
// TODO: switch this to full namespacers
type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error)
type podListFunc func(string, metav1.ListOptions) (*corev1.PodList, error)
// listReplicaSets returns a slice of RSes the given deployment targets.
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*appsv1.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getRSList(namespace, options)
if err != nil {
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make([]*appsv1.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
owned = append(owned, rs)
}
}
return owned, nil
}
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
func findNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet {
sort.Sort(replicaSetsByCreationTimestamp(rsList))
for i := range rsList {
if equalIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
return rsList[i]
}
}
// new ReplicaSet does not exist.
return nil
}
// replicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
type replicaSetsByCreationTimestamp []*appsv1.ReplicaSet
func (o replicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o replicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o replicaSetsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
// // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func findOldReplicaSets(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet) {
var requiredRSs []*appsv1.ReplicaSet
var allRSs []*appsv1.ReplicaSet
newRS := findNewReplicaSet(deployment, rsList)
for _, rs := range rsList {
// Filter out new replica set
if newRS != nil && rs.UID == newRS.UID {
continue
}
allRSs = append(allRSs, rs)
if *(rs.Spec.Replicas) != 0 {
requiredRSs = append(requiredRSs, rs)
}
}
return requiredRSs, allRSs
}
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
// step. For example:
//
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
if err != nil {
return 0, 0, err
}
unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
if err != nil {
return 0, 0, err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
return int32(surge), int32(unavailable), nil
}

View File

@ -0,0 +1,36 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package event
import (
corev1 "k8s.io/api/core/v1"
)
// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field
type SortableEvents []corev1.Event
func (list SortableEvents) Len() int {
return len(list)
}
func (list SortableEvents) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableEvents) Less(i, j int) bool {
return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time)
}

View File

@ -0,0 +1,111 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fieldpath
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
)
// TODO(yue9944882): Remove this helper package once it's copied to k/apimachinery
// FormatMap formats map[string]string to a string.
func FormatMap(m map[string]string) (fmtStr string) {
// output with keys in sorted order to provide stable output
keys := sets.NewString()
for key := range m {
keys.Insert(key)
}
for _, key := range keys.List() {
fmtStr += fmt.Sprintf("%v=%q\n", key, m[key])
}
fmtStr = strings.TrimSuffix(fmtStr, "\n")
return
}
// ExtractFieldPathAsString extracts the field from the given object
// and returns it as a string. The object must be a pointer to an
// API type.
func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", nil
}
if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok {
switch path {
case "metadata.annotations":
if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetAnnotations()[subscript], nil
case "metadata.labels":
if errs := validation.IsQualifiedName(subscript); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetLabels()[subscript], nil
default:
return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath)
}
}
switch fieldPath {
case "metadata.annotations":
return FormatMap(accessor.GetAnnotations()), nil
case "metadata.labels":
return FormatMap(accessor.GetLabels()), nil
case "metadata.name":
return accessor.GetName(), nil
case "metadata.namespace":
return accessor.GetNamespace(), nil
case "metadata.uid":
return string(accessor.GetUID()), nil
}
return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath)
}
// SplitMaybeSubscriptedPath checks whether the specified fieldPath is
// subscripted, and
// - if yes, this function splits the fieldPath into path and subscript, and
// returns (path, subscript, true).
// - if no, this function returns (fieldPath, "", false).
//
// Example inputs and outputs:
// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true)
// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true)
// - "metadata.labels['']" --> ("metadata.labels", "", true)
// - "metadata.labels" --> ("metadata.labels", "", false)
func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) {
if !strings.HasSuffix(fieldPath, "']") {
return fieldPath, "", false
}
s := strings.TrimSuffix(fieldPath, "']")
parts := strings.SplitN(s, "['", 2)
if len(parts) < 2 {
return fieldPath, "", false
}
if len(parts[0]) == 0 {
return fieldPath, "", false
}
return parts[0], parts[1], true
}

36
vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"k8s.io/api/core/v1"
)
// LookupContainerPortNumberByName find containerPort number by its named port name
func LookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) {
for _, ctr := range pod.Spec.Containers {
for _, ctrportspec := range ctr.Ports {
if ctrportspec.Name == name {
return ctrportspec.ContainerPort, nil
}
}
}
return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name)
}

View File

@ -0,0 +1,190 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podutils
import (
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/integer"
)
// IsPodAvailable returns true if a pod is available; false otherwise.
// Precondition for an available pod is that it must be ready. On top
// of that, there are two cases when a pod can be considered available:
// 1. minReadySeconds == 0, or
// 2. LastTransitionTime (is set) + minReadySeconds < current time
func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool {
if !IsPodReady(pod) {
return false
}
c := getPodReadyCondition(pod.Status)
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
return true
}
return false
}
// IsPodReady returns true if a pod is ready; false otherwise.
func IsPodReady(pod *corev1.Pod) bool {
return isPodReadyConditionTrue(pod.Status)
}
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
func isPodReadyConditionTrue(status corev1.PodStatus) bool {
condition := getPodReadyCondition(status)
return condition != nil && condition.Status == corev1.ConditionTrue
}
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition {
_, condition := getPodCondition(&status, corev1.PodReady)
return condition
}
// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
if status == nil {
return -1, nil
}
return getPodConditionFromList(status.Conditions, conditionType)
}
// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
type ByLogging []*corev1.Pod
func (s ByLogging) Len() int { return len(s) }
func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByLogging) Less(i, j int) bool {
// 1. assigned < unassigned
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
return len(s[i].Spec.NodeName) > 0
}
// 2. PodRunning < PodUnknown < PodPending
m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2}
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
}
// 3. ready < not ready
if IsPodReady(s[i]) != IsPodReady(s[j]) {
return IsPodReady(s[i])
}
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for more time < less time < empty time
if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
}
// 5. Pods with containers with higher restart counts < lower restart counts
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. older pods < newer pods < empty timestamp pods
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp)
}
return false
}
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
type ActivePods []*corev1.Pod
func (s ActivePods) Len() int { return len(s) }
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ActivePods) Less(i, j int) bool {
// 1. Unassigned < assigned
// If only one of the pods is unassigned, the unassigned one is smaller
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
return len(s[i].Spec.NodeName) == 0
}
// 2. PodPending < PodUnknown < PodRunning
m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2}
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
}
// 3. Not ready < ready
// If only one of the pods is not ready, the not ready one is smaller
if IsPodReady(s[i]) != IsPodReady(s[j]) {
return !IsPodReady(s[i])
}
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for empty time < less time < more time
// If both pods are ready, the latest ready one is smaller
if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
}
// 5. Pods with containers with higher restart counts < lower restart counts
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. Empty creation time pods < newer pods < older pods
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp)
}
return false
}
// afterOrZero checks if time t1 is after time t2; if one of them
// is zero, the zero time is seen as after non-zero time.
func afterOrZero(t1, t2 *metav1.Time) bool {
if t1.Time.IsZero() || t2.Time.IsZero() {
return t1.Time.IsZero()
}
return t1.After(t2.Time)
}
func podReadyTime(pod *corev1.Pod) *metav1.Time {
if IsPodReady(pod) {
for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
return &c.LastTransitionTime
}
}
}
return &metav1.Time{}
}
func maxContainerRestarts(pod *corev1.Pod) int {
maxRestarts := 0
for _, c := range pod.Status.ContainerStatuses {
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
}
return maxRestarts
}

95
vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory))
func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
requests := corev1.ResourceList{}
limits := corev1.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
for _, container := range pod.Spec.Containers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.Copy()
if _, exists := requests[name]; !exists {
requests[name] = *delta
} else {
delta.Add(requests[name])
requests[name] = *delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.Copy()
if _, exists := limits[name]; !exists {
limits[name] = *delta
} else {
delta.Add(limits[name])
limits[name] = *delta
}
}
}
if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return corev1.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return corev1.PodQOSGuaranteed
}
return corev1.PodQOSBurstable
}

128
vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbac
import (
rbacv1 "k8s.io/api/rbac/v1"
"reflect"
"strings"
)
type simpleResource struct {
Group string
Resource string
ResourceNameExist bool
ResourceName string
}
// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes.
// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check.
func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) {
compacted := make([]rbacv1.PolicyRule, 0, len(rules))
simpleRules := map[simpleResource]*rbacv1.PolicyRule{}
for _, rule := range rules {
if resource, isSimple := isSimpleResourceRule(&rule); isSimple {
if existingRule, ok := simpleRules[resource]; ok {
// Add the new verbs to the existing simple resource rule
if existingRule.Verbs == nil {
existingRule.Verbs = []string{}
}
existingRule.Verbs = append(existingRule.Verbs, rule.Verbs...)
} else {
// Copy the rule to accumulate matching simple resource rules into
simpleRules[resource] = rule.DeepCopy()
}
} else {
compacted = append(compacted, rule)
}
}
// Once we've consolidated the simple resource rules, add them to the compacted list
for _, simpleRule := range simpleRules {
compacted = append(compacted, *simpleRule)
}
return compacted, nil
}
// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values
func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) {
resource := simpleResource{}
// If we have "complex" rule attributes, return early without allocations or expensive comparisons
if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 {
return resource, false
}
// If we have multiple api groups or resources, return early
if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 {
return resource, false
}
// Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames
simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames}
if !reflect.DeepEqual(simpleRule, rule) {
return resource, false
}
if len(rule.ResourceNames) == 0 {
resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false}
} else {
resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]}
}
return resource, true
}
// BreakdownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one
// resource, and one resource name
func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule {
subrules := []rbacv1.PolicyRule{}
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
if len(rule.ResourceNames) > 0 {
for _, resourceName := range rule.ResourceNames {
subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}})
}
} else {
subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}})
}
}
}
}
// Non-resource URLs are unique because they only combine with verbs.
for _, nonResourceURL := range rule.NonResourceURLs {
for _, verb := range rule.Verbs {
subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}})
}
}
return subrules
}
// SortableRuleSlice is used to sort rule slice
type SortableRuleSlice []rbacv1.PolicyRule
func (s SortableRuleSlice) Len() int { return len(s) }
func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortableRuleSlice) Less(i, j int) bool {
return strings.Compare(s[i].String(), s[j].String()) < 0
}

View File

@ -0,0 +1,138 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"fmt"
"math"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
// containers of the pod.
func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) {
reqs, limits = corev1.ResourceList{}, corev1.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(reqs, container.Resources.Requests)
addResourceList(limits, container.Resources.Limits)
}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
}
return
}
// addResourceList adds the resources in newList to list
func addResourceList(list, new corev1.ResourceList) {
for name, quantity := range new {
if value, ok := list[name]; !ok {
list[name] = *quantity.Copy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource
// either list
func maxResourceList(list, new corev1.ResourceList) {
for name, quantity := range new {
if value, ok := list[name]; !ok {
list[name] = *quantity.Copy()
continue
} else {
if quantity.Cmp(value) > 0 {
list[name] = *quantity.Copy()
}
}
}
}
// ExtractContainerResourceValue extracts the value of a resource
// in an already known container
func ExtractContainerResourceValue(fs *corev1.ResourceFieldSelector, container *corev1.Container) (string, error) {
divisor := resource.Quantity{}
if divisor.Cmp(fs.Divisor) == 0 {
divisor = resource.MustParse("1")
} else {
divisor = fs.Divisor
}
switch fs.Resource {
case "limits.cpu":
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
case "limits.memory":
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
case "limits.ephemeral-storage":
return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor)
case "requests.cpu":
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
case "requests.memory":
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
case "requests.ephemeral-storage":
return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor)
}
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
}
// convertResourceCPUToString converts cpu value to the format of divisor and returns
// ceiling of the value.
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
return strconv.FormatInt(c, 10), nil
}
// convertResourceMemoryToString converts memory value to the format of divisor and returns
// ceiling of the value.
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
return strconv.FormatInt(m, 10), nil
}
// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns
// ceiling of the value.
func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) {
m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value())))
return strconv.FormatInt(m, 10), nil
}
var standardContainerResources = sets.NewString(
string(corev1.ResourceCPU),
string(corev1.ResourceMemory),
string(corev1.ResourceEphemeralStorage),
)
// IsStandardContainerResourceName returns true if the container can make a resource request
// for the specified resource
func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str) || IsHugePageResourceName(corev1.ResourceName(str))
}
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name corev1.ResourceName) bool {
return strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix)
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// LookupContainerPortNumberByServicePort implements
// the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None
// It returns an error when a named port can't find a match (with -1 returned), or when the service does not
// declare such port (with the input port number returned).
func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int32) (int32, error) {
for _, svcportspec := range svc.Spec.Ports {
if svcportspec.Port != port {
continue
}
if svc.Spec.ClusterIP == v1.ClusterIPNone {
return port, nil
}
if svcportspec.TargetPort.Type == intstr.Int {
if svcportspec.TargetPort.IntValue() == 0 {
// targetPort is omitted, and the IntValue() would be zero
return svcportspec.Port, nil
}
return int32(svcportspec.TargetPort.IntValue()), nil
}
return LookupContainerPortNumberByName(pod, svcportspec.TargetPort.String())
}
return port, fmt.Errorf("Service %s does not have a service port %d", svc.Name, port)
}
// LookupServicePortNumberByName find service port number by its named port name
func LookupServicePortNumberByName(svc v1.Service, name string) (int32, error) {
for _, svcportspec := range svc.Spec.Ports {
if svcportspec.Name == name {
return svcportspec.Port, nil
}
}
return int32(-1), fmt.Errorf("Service '%s' does not have a named port '%s'", svc.Name, name)
}

View File

@ -0,0 +1,38 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package slice
import (
"sort"
)
// SortInts64 sorts []int64 in increasing order
func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) }
// ContainsString checks if a given slice of strings contains the provided string.
// If a modifier func is provided, it is called with the slice item before the comparation.
func ContainsString(slice []string, s string, modifier func(s string) string) bool {
for _, item := range slice {
if item == s {
return true
}
if modifier != nil && modifier(item) == s {
return true
}
}
return false
}

View File

@ -0,0 +1,107 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strings"
)
// TODO(yue9944882): Remove this helper package once it's copied to k/api
// IsDefaultStorageClassAnnotation represents a StorageClass annotation that
// marks a class as the default StorageClass
const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
// BetaIsDefaultStorageClassAnnotation is the beta version of BetaIsDefaultStorageClassAnnotation.
const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
// IsDefaultAnnotationText returns a pretty Yes/No String if
// the annotation is set
func IsDefaultAnnotationText(obj metav1.ObjectMeta) string {
if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" {
return "Yes"
}
if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" {
return "Yes"
}
return "No"
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX.
func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if containsAccessMode(modes, v1.ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if containsAccessMode(modes, v1.ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if containsAccessMode(modes, v1.ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
return strings.Join(modesStr, ",")
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode {
accessModes := []v1.PersistentVolumeAccessMode{}
for _, m := range modes {
if !containsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
}
}
return accessModes
}
func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// GetPersistentVolumeClass returns StorageClassName.
func GetPersistentVolumeClass(volume *v1.PersistentVolume) string {
// Use beta annotation first
if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found {
return class
}
return volume.Spec.StorageClassName
}
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
// requested, it returns "".
func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
// Use beta annotation first
if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found {
return class
}
if claim.Spec.StorageClassName != nil {
return *claim.Spec.StorageClassName
}
return ""
}

28
vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"golang.org/x/sys/unix"
)
// Umask is a wrapper for `unix.Umask()` on non-Windows platforms
func Umask(mask int) (old int, err error) {
return unix.Umask(mask), nil
}

View File

@ -0,0 +1,28 @@
// +build windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
)
// Umask returns an error on Windows
func Umask(mask int) (int, error) {
return 0, errors.New("platform and architecture is not supported")
}

93
vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"crypto/md5"
"errors"
"fmt"
"path"
"path/filepath"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format.
func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) {
if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil {
return metav1.Time{Time: t}, nil
}
t, err := time.Parse(time.RFC3339, s)
if err != nil {
return metav1.Time{}, err
}
return metav1.Time{Time: t}, nil
}
// HashObject returns the hash of a Object hash by a Codec
func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) {
data, err := runtime.Encode(codec, obj)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", md5.Sum(data)), nil
}
// ParseFileSource parses the source given.
//
// Acceptable formats include:
// 1. source-path: the basename will become the key name
// 2. source-name=source-path: the source-name will become the key name and
// source-path is the path to the key file.
//
// Key names cannot include '='.
func ParseFileSource(source string) (keyName, filePath string, err error) {
numSeparators := strings.Count(source, "=")
switch {
case numSeparators == 0:
return path.Base(filepath.ToSlash(source)), source, nil
case numSeparators == 1 && strings.HasPrefix(source, "="):
return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "="))
case numSeparators == 1 && strings.HasSuffix(source, "="):
return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "="))
case numSeparators > 1:
return "", "", errors.New("Key names or file paths cannot contain '='")
default:
components := strings.Split(source, "=")
return components[0], components[1], nil
}
}
// ParseLiteralSource parses the source key=val pair into its component pieces.
// This functionality is distinguished from strings.SplitN(source, "=", 2) since
// it returns an error in the case of empty keys, values, or a missing equals sign.
func ParseLiteralSource(source string) (keyName, value string, err error) {
// leading equal is invalid
if strings.Index(source, "=") == 0 {
return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source)
}
// split after the first equal (so values can have the = character)
items := strings.SplitN(source, "=", 2)
if len(items) != 2 {
return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source)
}
return items[0], items[1], nil
}