build: move e2e dependencies into e2e/go.mod

Several packages are only used while running the e2e suite. These
packages are less important to update, as the they can not influence the
final executable that is part of the Ceph-CSI container-image.

By moving these dependencies out of the main Ceph-CSI go.mod, it is
easier to identify if a reported CVE affects Ceph-CSI, or only the
testing (like most of the Kubernetes CVEs).

Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
Niels de Vos
2025-03-04 08:57:28 +01:00
committed by mergify[bot]
parent 15da101b1b
commit bec6090996
8047 changed files with 1407827 additions and 3453 deletions

View File

@ -0,0 +1,37 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package legacyscheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
var (
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
// NOTE: If you are copying this file to start a new api group, STOP! Copy the
// extensions group instead. This Scheme is special and should appear ONLY in
// the api group, unless you really know what you're doing.
// TODO(lavalamp): make the above error impossible.
Scheme = runtime.NewScheme()
// Codecs provides access to encoding and decoding for the scheme
Codecs = serializer.NewCodecFactory(Scheme)
// ParameterCodec handles versioning of objects that are converted to query parameters.
ParameterCodec = runtime.NewParameterCodec(Scheme)
)

5
e2e/vendor/k8s.io/kubernetes/pkg/api/service/OWNERS generated vendored Normal file
View File

@ -0,0 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- justinsb
- freehan

93
e2e/vendor/k8s.io/kubernetes/pkg/api/service/util.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"strings"
api "k8s.io/kubernetes/pkg/apis/core"
utilnet "k8s.io/utils/net"
)
const (
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
)
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
func IsAllowAll(ipnets utilnet.IPNetSet) bool {
for _, s := range ipnets.StringSlice() {
if s == "0.0.0.0/0" {
return true
}
}
return false
}
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
func GetLoadBalancerSourceRanges(service *api.Service) (utilnet.IPNetSet, error) {
var ipnets utilnet.IPNetSet
var err error
// if SourceRange field is specified, ignore sourceRange annotation
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
specs := service.Spec.LoadBalancerSourceRanges
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
}
} else {
val := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey]
val = strings.TrimSpace(val)
if val == "" {
val = defaultLoadBalancerSourceRanges
}
specs := strings.Split(val, ",")
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", api.AnnotationLoadBalancerSourceRangesKey, val)
}
}
return ipnets, nil
}
// ExternallyAccessible checks if service is externally accessible.
func ExternallyAccessible(service *api.Service) bool {
return service.Spec.Type == api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeNodePort ||
(service.Spec.Type == api.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0)
}
// RequestsOnlyLocalTraffic checks if service requests OnlyLocal traffic.
func RequestsOnlyLocalTraffic(service *api.Service) bool {
if service.Spec.Type != api.ServiceTypeLoadBalancer &&
service.Spec.Type != api.ServiceTypeNodePort {
return false
}
return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyLocal
}
// NeedsHealthCheck checks if service needs health check.
func NeedsHealthCheck(service *api.Service) bool {
if service.Spec.Type != api.ServiceTypeLoadBalancer {
return false
}
return RequestsOnlyLocalTraffic(service)
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"net/netip"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
func GetWarningsForService(service, oldService *api.Service) []string {
if service == nil {
return nil
}
var warnings []string
if _, ok := service.Annotations[api.DeprecatedAnnotationTopologyAwareHints]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %s is deprecated, please use %s instead", api.DeprecatedAnnotationTopologyAwareHints, api.AnnotationTopologyMode))
}
if helper.IsServiceIPSet(service) {
for i, clusterIP := range service.Spec.ClusterIPs {
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("clusterIPs").Index(i), clusterIP)...)
}
}
for i, externalIP := range service.Spec.ExternalIPs {
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("externalIPs").Index(i), externalIP)...)
}
if len(service.Spec.LoadBalancerIP) > 0 {
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("loadBalancerIP"), service.Spec.LoadBalancerIP)...)
}
for i, cidr := range service.Spec.LoadBalancerSourceRanges {
warnings = append(warnings, getWarningsForCIDR(field.NewPath("spec").Child("loadBalancerSourceRanges").Index(i), cidr)...)
}
if service.Spec.Type == api.ServiceTypeExternalName && len(service.Spec.ExternalIPs) > 0 {
warnings = append(warnings, fmt.Sprintf("spec.externalIPs is ignored when spec.type is %q", api.ServiceTypeExternalName))
}
if service.Spec.Type != api.ServiceTypeExternalName && service.Spec.ExternalName != "" {
warnings = append(warnings, fmt.Sprintf("spec.externalName is ignored when spec.type is not %q", api.ServiceTypeExternalName))
}
return warnings
}
func getWarningsForIP(fieldPath *field.Path, address string) []string {
// IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17
// This will also warn about possible future changes on the golang std library
// xref: https://issues.k8s.io/108074
ip, err := netip.ParseAddr(address)
if err != nil {
return []string{fmt.Sprintf("%s: IP address was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)}
}
// A Recommendation for IPv6 Address Text Representation
//
// "All of the above examples represent the same IPv6 address. This
// flexibility has caused many problems for operators, systems
// engineers, and customers.
// ..."
// https://datatracker.ietf.org/doc/rfc5952/
if ip.Is6() && ip.String() != address {
return []string{fmt.Sprintf("%s: IPv6 address %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, address, ip.String())}
}
return []string{}
}
func getWarningsForCIDR(fieldPath *field.Path, cidr string) []string {
// IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17
// This will also warn about possible future changes on the golang std library
// xref: https://issues.k8s.io/108074
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
return []string{fmt.Sprintf("%s: IP prefix was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)}
}
// A Recommendation for IPv6 Address Text Representation
//
// "All of the above examples represent the same IPv6 address. This
// flexibility has caused many problems for operators, systems
// engineers, and customers.
// ..."
// https://datatracker.ietf.org/doc/rfc5952/
if prefix.Addr().Is6() && prefix.String() != cidr {
return []string{fmt.Sprintf("%s: IPv6 prefix %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, cidr, prefix.String())}
}
return []string{}
}

418
e2e/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go generated vendored Normal file
View File

@ -0,0 +1,418 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// FindPort locates the container port for the given pod and portName. If the
// targetPort is a number, use that. If the targetPort is a string, look that
// string up in all named ports in all containers in the target pod. If no
// match is found, fail.
func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) {
portName := svcPort.TargetPort
switch portName.Type {
case intstr.String:
name := portName.StrVal
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == name && port.Protocol == svcPort.Protocol {
return int(port.ContainerPort), nil
}
}
}
// also support sidecar container (initContainer with restartPolicy=Always)
for _, container := range pod.Spec.InitContainers {
if container.RestartPolicy == nil || *container.RestartPolicy != v1.ContainerRestartPolicyAlways {
continue
}
for _, port := range container.Ports {
if port.Name == name && port.Protocol == svcPort.Protocol {
return int(port.ContainerPort), nil
}
}
}
case intstr.Int:
return portName.IntValue(), nil
}
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
}
// ContainerType signifies container type
type ContainerType int
const (
// Containers is for normal containers
Containers ContainerType = 1 << iota
// InitContainers is for init containers
InitContainers
// EphemeralContainers is for ephemeral containers
EphemeralContainers
)
// AllContainers specifies that all containers be visited
const AllContainers ContainerType = InitContainers | Containers | EphemeralContainers
// AllFeatureEnabledContainers returns a ContainerType mask which includes all container
// types except for the ones guarded by feature gate.
func AllFeatureEnabledContainers() ContainerType {
return AllContainers
}
// ContainerVisitor is called with each container spec, and returns true
// if visiting should continue.
type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool)
// Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitContainers invokes the visitor function with a pointer to every container
// spec in the given pod spec with type set in mask. If visitor returns false,
// visiting is short-circuited. VisitContainers returns true if visiting completes,
// false if visiting was short-circuited.
func VisitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
if mask&InitContainers != 0 {
for i := range podSpec.InitContainers {
if !visitor(&podSpec.InitContainers[i], InitContainers) {
return false
}
}
}
if mask&Containers != 0 {
for i := range podSpec.Containers {
if !visitor(&podSpec.Containers[i], Containers) {
return false
}
}
}
if mask&EphemeralContainers != 0 {
for i := range podSpec.EphemeralContainers {
if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
return false
}
}
}
return true
}
// VisitPodSecretNames invokes the visitor function with the name of every secret
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) {
return false
}
}
VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool {
return visitContainerSecretNames(c, visitor)
})
var source *v1.VolumeSource
for i := range pod.Spec.Volumes {
source = &pod.Spec.Volumes[i].VolumeSource
switch {
case source.AzureFile != nil:
if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) {
return false
}
case source.CephFS != nil:
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
return false
}
case source.Cinder != nil:
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
return false
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
return false
}
case source.Projected != nil:
for j := range source.Projected.Sources {
if source.Projected.Sources[j].Secret != nil {
if !visitor(source.Projected.Sources[j].Secret.Name) {
return false
}
}
}
case source.RBD != nil:
if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) {
return false
}
case source.Secret != nil:
if !visitor(source.Secret.SecretName) {
return false
}
case source.ScaleIO != nil:
if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) {
return false
}
case source.ISCSI != nil:
if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) {
return false
}
case source.StorageOS != nil:
if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) {
return false
}
case source.CSI != nil:
if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) {
return false
}
}
}
return true
}
// visitContainerSecretNames returns true unless the visitor returned false when invoked with a secret reference
func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
for _, env := range container.EnvFrom {
if env.SecretRef != nil {
if !visitor(env.SecretRef.Name) {
return false
}
}
}
for _, envVar := range container.Env {
if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil {
if !visitor(envVar.ValueFrom.SecretKeyRef.Name) {
return false
}
}
}
return true
}
// VisitPodConfigmapNames invokes the visitor function with the name of every configmap
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool {
return visitContainerConfigmapNames(c, visitor)
})
var source *v1.VolumeSource
for i := range pod.Spec.Volumes {
source = &pod.Spec.Volumes[i].VolumeSource
switch {
case source.Projected != nil:
for j := range source.Projected.Sources {
if source.Projected.Sources[j].ConfigMap != nil {
if !visitor(source.Projected.Sources[j].ConfigMap.Name) {
return false
}
}
}
case source.ConfigMap != nil:
if !visitor(source.ConfigMap.Name) {
return false
}
}
}
return true
}
// visitContainerConfigmapNames returns true unless the visitor returned false when invoked with a configmap reference
func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool {
for _, env := range container.EnvFrom {
if env.ConfigMapRef != nil {
if !visitor(env.ConfigMapRef.Name) {
return false
}
}
}
for _, envVar := range container.Env {
if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil {
if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) {
return false
}
}
}
return true
}
// GetContainerStatus extracts the status of container "name" from "statuses".
// It returns true if "name" exists, else returns false.
func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) {
for i := range statuses {
if statuses[i].Name == name {
return statuses[i], true
}
}
return v1.ContainerStatus{}, false
}
// GetExistingContainerStatus extracts the status of container "name" from "statuses",
// It also returns if "name" exists.
func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus {
status, _ := GetContainerStatus(statuses, name)
return status
}
// GetIndexOfContainerStatus gets the index of status of container "name" from "statuses",
// It returns (index, true) if "name" exists, else returns (0, false).
func GetIndexOfContainerStatus(statuses []v1.ContainerStatus, name string) (int, bool) {
for i := range statuses {
if statuses[i].Name == name {
return i, true
}
}
return 0, false
}
// IsPodAvailable returns true if a pod is available; false otherwise.
// Precondition for an available pod is that it must be ready. On top
// of that, there are two cases when a pod can be considered available:
// 1. minReadySeconds == 0, or
// 2. LastTransitionTime (is set) + minReadySeconds < current time
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool {
if !IsPodReady(pod) {
return false
}
c := GetPodReadyCondition(pod.Status)
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time)) {
return true
}
return false
}
// IsPodReady returns true if a pod is ready; false otherwise.
func IsPodReady(pod *v1.Pod) bool {
return IsPodReadyConditionTrue(pod.Status)
}
// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress.
func IsPodTerminal(pod *v1.Pod) bool {
return IsPodPhaseTerminal(pod.Status.Phase)
}
// IsPodPhaseTerminal returns true if the pod's phase is terminal.
func IsPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
condition := GetPodReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue
}
// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsContainersReadyConditionTrue(status v1.PodStatus) bool {
condition := GetContainersReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue
}
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
_, condition := GetPodCondition(&status, v1.PodReady)
return condition
}
// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition {
_, condition := GetPodCondition(&status, v1.ContainersReady)
return condition
}
// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if status == nil {
return -1, nil
}
return GetPodConditionFromList(status.Conditions, conditionType)
}
// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}
// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
// status has changed.
// Returns true if pod condition has changed or has been added.
func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool {
condition.LastTransitionTime = metav1.Now()
// Try to find this pod condition.
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
if oldCondition == nil {
// We are adding new pod condition.
status.Conditions = append(status.Conditions, *condition)
return true
}
// We are updating an existing condition, so we need to check if it has changed.
if condition.Status == oldCondition.Status {
condition.LastTransitionTime = oldCondition.LastTransitionTime
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) &&
condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime)
status.Conditions[conditionIndex] = *condition
// Return true if one of the fields have changed.
return !isEqual
}
// IsRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways.
// This function is not checking if the container passed to it is indeed an init container.
// It is just checking if the container restart policy has been set to always.
func IsRestartableInitContainer(initContainer *v1.Container) bool {
if initContainer == nil || initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
}

View File

@ -0,0 +1,99 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
utilnet "k8s.io/utils/net"
)
const (
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
)
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
func IsAllowAll(ipnets utilnet.IPNetSet) bool {
for _, s := range ipnets.StringSlice() {
if s == "0.0.0.0/0" {
return true
}
}
return false
}
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
func GetLoadBalancerSourceRanges(service *v1.Service) (utilnet.IPNetSet, error) {
var ipnets utilnet.IPNetSet
var err error
// if SourceRange field is specified, ignore sourceRange annotation
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
specs := service.Spec.LoadBalancerSourceRanges
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
}
} else {
val := service.Annotations[v1.AnnotationLoadBalancerSourceRangesKey]
val = strings.TrimSpace(val)
if val == "" {
val = defaultLoadBalancerSourceRanges
}
specs := strings.Split(val, ",")
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", v1.AnnotationLoadBalancerSourceRangesKey, val)
}
}
return ipnets, nil
}
// ExternallyAccessible checks if service is externally accessible.
func ExternallyAccessible(service *v1.Service) bool {
return service.Spec.Type == v1.ServiceTypeLoadBalancer ||
service.Spec.Type == v1.ServiceTypeNodePort ||
(service.Spec.Type == v1.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0)
}
// ExternalPolicyLocal checks if service is externally accessible and has ETP = Local.
func ExternalPolicyLocal(service *v1.Service) bool {
if !ExternallyAccessible(service) {
return false
}
return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal
}
// InternalPolicyLocal checks if service has ITP = Local.
func InternalPolicyLocal(service *v1.Service) bool {
if service.Spec.InternalTrafficPolicy == nil {
return false
}
return *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal
}
// NeedsHealthCheck checks if service needs health check.
func NeedsHealthCheck(service *v1.Service) bool {
if service.Spec.Type != v1.ServiceTypeLoadBalancer {
return false
}
return ExternalPolicyLocal(service)
}

8
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS generated vendored Normal file
View File

@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
# approval on api packages bubbles to api-approvers
reviewers:
- sig-apps-api-reviewers
- sig-apps-api-approvers
labels:
- sig/apps

19
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package apps // import "k8s.io/kubernetes/pkg/apis/apps"

66
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/autoscaling"
)
var (
// SchemeBuilder stores functions to add things to a scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all stored functions t oa scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&DaemonSet{},
&DaemonSetList{},
&Deployment{},
&DeploymentList{},
&DeploymentRollback{},
&autoscaling.Scale{},
&StatefulSet{},
&StatefulSetList{},
&ControllerRevision{},
&ControllerRevisionList{},
&ReplicaSet{},
&ReplicaSetList{},
)
return nil
}

918
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go generated vendored Normal file
View File

@ -0,0 +1,918 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StatefulSet represents a set of pods with consistent identities.
// Identities are defined as:
// - Network: A single stable DNS and hostname.
// - Storage: As many VolumeClaims as requested.
//
// The StatefulSet guarantees that a given network identity will always
// map to the same storage identity.
type StatefulSet struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Spec defines the desired identities of pods in this set.
// +optional
Spec StatefulSetSpec
// Status is the current status of Pods in this StatefulSet. This data
// may be out of date by some window of time.
// +optional
Status StatefulSetStatus
}
// PodManagementPolicyType defines the policy for creating pods under a stateful set.
type PodManagementPolicyType string
const (
// OrderedReadyPodManagement will create pods in strictly increasing order on
// scale up and strictly decreasing order on scale down, progressing only when
// the previous pod is ready or terminated. At most one pod will be changed
// at any time.
OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
// ParallelPodManagement will create and delete pods as soon as the stateful set
// replica count is changed, and will not wait for pods to be ready or complete
// termination.
ParallelPodManagement PodManagementPolicyType = "Parallel"
)
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
// controller will use to perform updates. It includes any additional parameters
// necessary to perform the update for the indicated strategy.
type StatefulSetUpdateStrategy struct {
// Type indicates the type of the StatefulSetUpdateStrategy.
Type StatefulSetUpdateStrategyType
// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
RollingUpdate *RollingUpdateStatefulSetStrategy
}
// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
// all possible update strategies for the StatefulSet controller.
type StatefulSetUpdateStrategyType string
const (
// RollingUpdateStatefulSetStrategyType indicates that update will be
// applied to all Pods in the StatefulSet with respect to the StatefulSet
// ordering constraints. When a scale operation is performed with this
// strategy, new Pods will be created from the specification version indicated
// by the StatefulSet's updateRevision.
RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate"
// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
// tracking and ordered rolling restarts are disabled. Pods are recreated
// from the StatefulSetSpec when they are manually deleted. When a scale
// operation is performed with this strategy,specification version indicated
// by the StatefulSet's currentRevision.
OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete"
)
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
type RollingUpdateStatefulSetStrategy struct {
// Partition indicates the ordinal at which the StatefulSet should be partitioned
// for updates. During a rolling update, all pods from ordinal Replicas-1 to
// Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched.
// This is helpful in being able to do a canary based deployment. The default value is 0.
Partition int32
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
// Defaults to 1. This field is alpha-level and is only honored by servers that enable the
// MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
// +optional
MaxUnavailable *intstr.IntOrString
}
// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine
// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is
// deleted or scaled down.
type PersistentVolumeClaimRetentionPolicyType string
const (
// RetainPersistentVolumeClaimRetentionPolicyType is the default
// PersistentVolumeClaimRetentionPolicy and specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will not be deleted.
RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain"
// DeletePersistentVolumeClaimRetentionPolicyType specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will be deleted in the scenario specified in
// StatefulSetPersistentVolumeClaimPolicy.
DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
)
// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
// created from the StatefulSet VolumeClaimTemplates.
type StatefulSetPersistentVolumeClaimRetentionPolicy struct {
// WhenDeleted specifies what happens to PVCs created from StatefulSet
// VolumeClaimTemplates when the StatefulSet is deleted. The default policy
// of `Retain` causes PVCs to not be affected by StatefulSet deletion. The
// `Delete` policy causes those PVCs to be deleted.
WhenDeleted PersistentVolumeClaimRetentionPolicyType
// WhenScaled specifies what happens to PVCs created from StatefulSet
// VolumeClaimTemplates when the StatefulSet is scaled down. The default
// policy of `Retain` causes PVCs to not be affected by a scaledown. The
// `Delete` policy causes the associated PVCs for any excess pods above
// the replica count to be deleted.
WhenScaled PersistentVolumeClaimRetentionPolicyType
}
// StatefulSetOrdinals describes the policy used for replica ordinal assignment
// in this StatefulSet.
type StatefulSetOrdinals struct {
// start is the number representing the first replica's index. It may be used
// to number replicas from an alternate index (eg: 1-indexed) over the default
// 0-indexed names, or to orchestrate progressive movement of replicas from
// one StatefulSet to another.
// If set, replica indices will be in the range:
// [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).
// If unset, defaults to 0. Replica indices will be in the range:
// [0, .spec.replicas).
// +optional
Start int32
}
// A StatefulSetSpec is the specification of a StatefulSet.
type StatefulSetSpec struct {
// Replicas is the desired number of replicas of the given Template.
// These are replicas in the sense that they are instantiations of the
// same Template, but individual replicas also have a consistent identity.
// If unspecified, defaults to 1.
// TODO: Consider a rename of this field.
// +optional
Replicas int32
// Selector is a label query over pods that should match the replica count.
// If empty, defaulted to labels on the pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
// will fulfill this Template, but have a unique identity from the rest
// of the StatefulSet. Each pod will be named with the format
// <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named
// "web" with index number "3" would be named "web-3".
// The only allowed template.spec.restartPolicy value is "Always".
Template api.PodTemplateSpec
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
// The StatefulSet controller is responsible for mapping network identities to
// claims in a way that maintains the identity of a pod. Every claim in
// this list must have at least one matching (by name) volumeMount in one
// container in the template. A claim in this list takes precedence over
// any volumes in the template, with the same name.
// TODO: Define the behavior if a claim already exists with the same name.
// +optional
VolumeClaimTemplates []api.PersistentVolumeClaim
// ServiceName is the name of the service that governs this StatefulSet.
// This service must exist before the StatefulSet, and is responsible for
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
ServiceName string
// PodManagementPolicy controls how pods are created during initial scale up,
// when replacing pods on nodes, or when scaling down. The default policy is
// `OrderedReady`, where pods are created in increasing order (pod-0, then
// pod-1, etc) and the controller will wait until each pod is ready before
// continuing. When scaling down, the pods are removed in the opposite order.
// The alternative policy is `Parallel` which will create pods in parallel
// to match the desired scale without waiting, and on scale down will delete
// all pods at once.
// +optional
PodManagementPolicy PodManagementPolicyType
// updateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
UpdateStrategy StatefulSetUpdateStrategy
// revisionHistoryLimit is the maximum number of revisions that will
// be maintained in the StatefulSet's revision history. The revision history
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
RevisionHistoryLimit *int32
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
// the StatefulSet VolumeClaimTemplates. This requires the
// StatefulSetAutoDeletePVC feature gate to be enabled, which is beta and default on from 1.27.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy
// ordinals controls the numbering of replica indices in a StatefulSet. The
// default ordinals behavior assigns a "0" index to the first replica and
// increments the index by one for each additional replica requested.
// +optional
Ordinals *StatefulSetOrdinals
}
// StatefulSetStatus represents the current state of a StatefulSet.
type StatefulSetStatus struct {
// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
// StatefulSet's generation, which is updated on mutation by the API Server.
// +optional
ObservedGeneration *int64
// replicas is the number of Pods created by the StatefulSet controller.
Replicas int32
// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
ReadyReplicas int32
// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
// indicated by currentRevision.
CurrentReplicas int32
// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
// indicated by updateRevision.
UpdatedReplicas int32
// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
// sequence [0,currentReplicas).
CurrentRevision string
// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
// [replicas-updatedReplicas,replicas)
UpdateRevision string
// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
// uses this field as a collision avoidance mechanism when it needs to create the name for the
// newest ControllerRevision.
// +optional
CollisionCount *int32
// Represents the latest available observations of a statefulset's current state.
Conditions []StatefulSetCondition
// Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
// +optional
AvailableReplicas int32
}
// StatefulSetConditionType describes the condition types of StatefulSets.
type StatefulSetConditionType string
// TODO: Add valid condition types for Statefulsets.
// StatefulSetCondition describes the state of a statefulset at a certain point.
type StatefulSetCondition struct {
// Type of statefulset condition.
Type StatefulSetConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// The last time this condition was updated.
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
Reason string
// A human readable message indicating details about the transition.
Message string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StatefulSetList is a collection of StatefulSets.
type StatefulSetList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []StatefulSet
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ControllerRevision implements an immutable snapshot of state data. Clients
// are responsible for serializing and deserializing the objects that contain
// their internal state.
// Once a ControllerRevision has been successfully created, it can not be updated.
// The API Server will fail validation of all requests that attempt to mutate
// the Data field. ControllerRevisions may, however, be deleted.
type ControllerRevision struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Data is the Object representing the state.
Data runtime.RawExtension
// Revision indicates the revision of the state represented by Data.
Revision int64
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
type ControllerRevisionList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// Items is the list of ControllerRevision objects.
Items []ControllerRevision
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Deployment provides declarative updates for Pods and ReplicaSets.
type Deployment struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the Deployment.
// +optional
Spec DeploymentSpec
// Most recently observed status of the Deployment.
// +optional
Status DeploymentStatus
}
// DeploymentSpec specifies the state of a Deployment.
type DeploymentSpec struct {
// Number of desired pods.
Replicas int32
// Label selector for pods. Existing ReplicaSets whose pods are
// selected by this will be the ones affected by this deployment.
// +optional
Selector *metav1.LabelSelector
// Template describes the pods that will be created.
// The only allowed template.spec.restartPolicy value is "Always".
Template api.PodTemplateSpec
// The deployment strategy to use to replace existing pods with new ones.
// +optional
Strategy DeploymentStrategy
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32
// The number of old ReplicaSets to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// This is set to the max value of int32 (i.e. 2147483647) by default, which means
// "retaining all old ReplicaSets".
// +optional
RevisionHistoryLimit *int32
// Indicates that the deployment is paused and will not be processed by the
// deployment controller.
// +optional
Paused bool
// DEPRECATED.
// The config this deployment is rolling back to. Will be cleared after rollback is done.
// +optional
RollbackTo *RollbackConfig
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. This is set to
// the max value of int32 (i.e. 2147483647) by default, which means "no deadline".
// +optional
ProgressDeadlineSeconds *int32
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeploymentRollback stores the information required to rollback a deployment.
// DEPRECATED.
type DeploymentRollback struct {
metav1.TypeMeta
// Required: This must match the Name of a deployment.
Name string
// The annotations to be updated to a deployment
// +optional
UpdatedAnnotations map[string]string
// The config of this deployment rollback.
RollbackTo RollbackConfig
}
// RollbackConfig specifies the state of a revision to roll back to.
// DEPRECATED.
type RollbackConfig struct {
// The revision to rollback to. If set to 0, rollback to the last revision.
// +optional
Revision int64
}
const (
// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
// to existing RCs (and label key that is added to its pods) to prevent the existing RCs
// to select new pods (and old pods being select by new RC).
DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
)
// DeploymentStrategy stores information about the strategy and rolling-update
// behavior of a deployment.
type DeploymentStrategy struct {
// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
// +optional
Type DeploymentStrategyType
// Rolling update config params. Present only if DeploymentStrategyType =
// RollingUpdate.
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be.
// +optional
RollingUpdate *RollingUpdateDeployment
}
// DeploymentStrategyType defines strategies with a deployment.
type DeploymentStrategyType string
const (
// RecreateDeploymentStrategyType - kill all existing pods before creating new ones.
RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
// RollingUpdateDeploymentStrategyType - Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
)
// RollingUpdateDeployment is the spec to control the desired behavior of rolling update.
type RollingUpdateDeployment struct {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%).
// Absolute number is calculated from percentage by rounding down.
// This can not be 0 if MaxSurge is 0.
// By default, a fixed value of 1 is used.
// Example: when this is set to 30%, the old RC can be scaled down by 30%
// immediately when the rolling update starts. Once new pods are ready, old RC
// can be scaled down further, followed by scaling up the new RC, ensuring
// that at least 70% of original number of pods are available at all times
// during the update.
// +optional
MaxUnavailable intstr.IntOrString
// The maximum number of pods that can be scheduled above the original number of
// pods.
// Value can be an absolute number (ex: 5) or a percentage of total pods at
// the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0.
// Absolute number is calculated from percentage by rounding up.
// By default, a value of 1 is used.
// Example: when this is set to 30%, the new RC can be scaled up by 30%
// immediately when the rolling update starts. Once old pods have been killed,
// new RC can be scaled up further, ensuring that total number of pods running
// at any time during the update is at most 130% of original pods.
// +optional
MaxSurge intstr.IntOrString
}
// DeploymentStatus holds information about the observed status of a deployment.
type DeploymentStatus struct {
// The generation observed by the deployment controller.
// +optional
ObservedGeneration int64
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32
// Total number of ready pods targeted by this deployment.
// +optional
ReadyReplicas int32
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32
// Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
UnavailableReplicas int32
// Represents the latest available observations of a deployment's current state.
Conditions []DeploymentCondition
// Count of hash collisions for the Deployment. The Deployment controller uses this
// field as a collision avoidance mechanism when it needs to create the name for the
// newest ReplicaSet.
// +optional
CollisionCount *int32
}
// DeploymentConditionType defines conditions of a deployment.
type DeploymentConditionType string
// These are valid conditions of a deployment.
const (
// Available means the deployment is available, ie. at least the minimum available
// replicas required are up and running for at least minReadySeconds.
DeploymentAvailable DeploymentConditionType = "Available"
// Progressing means the deployment is progressing. Progress for a deployment is
// considered when a new replica set is created or adopted, and when new pods scale
// up or old pods scale down. Progress is not estimated for paused deployments or
// when progressDeadlineSeconds is not specified.
DeploymentProgressing DeploymentConditionType = "Progressing"
// ReplicaFailure is added in a deployment when one of its pods fails to be created
// or deleted.
DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
)
// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentCondition struct {
// Type of deployment condition.
Type DeploymentConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// The last time this condition was updated.
LastUpdateTime metav1.Time
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
Reason string
// A human readable message indicating details about the transition.
Message string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeploymentList defines multiple deployments.
type DeploymentList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// Items is the list of deployments.
Items []Deployment
}
// DaemonSetUpdateStrategy defines a strategy to update a daemon set.
type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
// +optional
Type DaemonSetUpdateStrategyType
// Rolling update config params. Present only if type = "RollingUpdate".
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as Deployment `strategy.rollingUpdate`.
// See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
RollingUpdate *RollingUpdateDaemonSet
}
// DaemonSetUpdateStrategyType is a strategy according to which a daemon set
// gets updated.
type DaemonSetUpdateStrategyType string
const (
// RollingUpdateDaemonSetStrategyType - Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
// OnDeleteDaemonSetStrategyType - Replace the old daemons only when it's killed
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
)
// RollingUpdateDaemonSet is the spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their pods stopped for an update at any given time. The update
// starts by stopping at most 30% of those DaemonSet pods and then brings
// up new DaemonSet pods in their place. Once the new pods are available,
// it then proceeds onto other DaemonSet pods, thus ensuring that at least
// 70% of original number of DaemonSet pods are available at all times during
// the update.
// +optional
MaxUnavailable intstr.IntOrString
// The maximum number of nodes with an existing available DaemonSet pod that
// can have an updated DaemonSet pod during during an update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// This can not be 0 if MaxUnavailable is 0.
// Absolute number is calculated from percentage by rounding up to a minimum of 1.
// Default value is 0.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their a new pod created before the old pod is marked as deleted.
// The update starts by launching new pods on 30% of nodes. Once an updated
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
// pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
// cause evictions during disruption.
// +optional
MaxSurge intstr.IntOrString
}
// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpec struct {
// A label query over pods that are managed by the daemon set.
// Must match in order to be controlled.
// If empty, defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector
// An object that describes the pod that will be created.
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// The only allowed template.spec.restartPolicy value is "Always".
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
Template api.PodTemplateSpec
// An update strategy to replace existing DaemonSet pods with new pods.
// +optional
UpdateStrategy DaemonSetUpdateStrategy
// The minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
MinReadySeconds int32
// DEPRECATED.
// A sequence number representing a specific generation of the template.
// Populated by the system. It can be set only during the creation.
// +optional
TemplateGeneration int64
// The number of old history to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
RevisionHistoryLimit *int32
}
// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatus struct {
// The number of nodes that are running at least 1
// daemon pod and are supposed to run the daemon pod.
CurrentNumberScheduled int32
// The number of nodes that are running the daemon pod, but are
// not supposed to run the daemon pod.
NumberMisscheduled int32
// The total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
DesiredNumberScheduled int32
// The number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
NumberReady int32
// The most recent generation observed by the daemon set controller.
// +optional
ObservedGeneration int64
// The total number of nodes that are running updated daemon pod
// +optional
UpdatedNumberScheduled int32
// The number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least spec.minReadySeconds)
// +optional
NumberAvailable int32
// The number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least spec.minReadySeconds)
// +optional
NumberUnavailable int32
// Count of hash collisions for the DaemonSet. The DaemonSet controller
// uses this field as a collision avoidance mechanism when it needs to
// create the name for the newest ControllerRevision.
// +optional
CollisionCount *int32
// Represents the latest available observations of a DaemonSet's current state.
Conditions []DaemonSetCondition
}
// DaemonSetConditionType defines a daemon set condition.
type DaemonSetConditionType string
// TODO: Add valid condition types of a DaemonSet.
// DaemonSetCondition describes the state of a DaemonSet at a certain point.
type DaemonSetCondition struct {
// Type of DaemonSet condition.
Type DaemonSetConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
Reason string
// A human readable message indicating details about the transition.
Message string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSet represents the configuration of a daemon set.
type DaemonSet struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// The desired behavior of this daemon set.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec DaemonSetSpec
// The current status of this daemon set. This data may be
// out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status DaemonSetStatus
}
const (
// DaemonSetTemplateGenerationKey is the key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates
// during DaemonSet template update.
// DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead.
DaemonSetTemplateGenerationKey string = "pod-template-generation"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSetList is a collection of daemon sets.
type DaemonSetList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// A list of daemon sets.
Items []DaemonSet
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSet struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Spec defines the desired behavior of this ReplicaSet.
// +optional
Spec ReplicaSetSpec
// Status is the current status of this ReplicaSet. This data may be
// out of date by some window of time.
// +optional
Status ReplicaSetStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSetList is a collection of ReplicaSets.
type ReplicaSetList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []ReplicaSet
}
// ReplicaSetSpec is the specification of a ReplicaSet.
// As the internal representation of a ReplicaSet, it must have
// a Template set.
type ReplicaSetSpec struct {
// Replicas is the number of desired replicas.
Replicas int32
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32
// Selector is a label query over pods that should match the replica count.
// Must match in order to be controlled.
// If empty, defaulted to labels on pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
// The only allowed template.spec.restartPolicy value is "Always".
// +optional
Template api.PodTemplateSpec
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatus struct {
// Replicas is the number of actual replicas.
Replicas int32
// The number of pods that have labels matching the labels of the pod template of the replicaset.
// +optional
FullyLabeledReplicas int32
// The number of ready replicas for this replica set.
// +optional
ReadyReplicas int32
// The number of available replicas (ready for at least minReadySeconds) for this replica set.
// +optional
AvailableReplicas int32
// ObservedGeneration is the most recent generation observed by the controller.
// +optional
ObservedGeneration int64
// Represents the latest available observations of a replica set's current state.
// +optional
Conditions []ReplicaSetCondition
}
// ReplicaSetConditionType is a condition of a replica set.
type ReplicaSetConditionType string
// These are valid conditions of a replica set.
const (
// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
// due to kubelet being down or finalizers are failing.
ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
)
// ReplicaSetCondition describes the state of a replica set at a certain point.
type ReplicaSetCondition struct {
// Type of replica set condition.
Type ReplicaSetConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
// +optional
Reason string
// A human readable message indicating details about the transition.
// +optional
Message string
}

View File

@ -0,0 +1,848 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package apps
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Data.DeepCopyInto(&out.Data)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
func (in *ControllerRevision) DeepCopy() *ControllerRevision {
if in == nil {
return nil
}
out := new(ControllerRevision)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ControllerRevision, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
if in == nil {
return nil
}
out := new(ControllerRevisionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
func (in *DaemonSet) DeepCopy() *DaemonSet {
if in == nil {
return nil
}
out := new(DaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
if in == nil {
return nil
}
out := new(DaemonSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DaemonSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
if in == nil {
return nil
}
out := new(DaemonSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
if in == nil {
return nil
}
out := new(DaemonSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DaemonSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
if in == nil {
return nil
}
out := new(DaemonSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateDaemonSet)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
if in == nil {
return nil
}
out := new(DaemonSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Deployment) DeepCopyInto(out *Deployment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
func (in *Deployment) DeepCopy() *Deployment {
if in == nil {
return nil
}
out := new(Deployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
if in == nil {
return nil
}
out := new(DeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Deployment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
func (in *DeploymentList) DeepCopy() *DeploymentList {
if in == nil {
return nil
}
out := new(DeploymentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UpdatedAnnotations != nil {
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RollbackTo = in.RollbackTo
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
if in == nil {
return nil
}
out := new(DeploymentRollback)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.Strategy.DeepCopyInto(&out.Strategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.RollbackTo != nil {
in, out := &in.RollbackTo, &out.RollbackTo
*out = new(RollbackConfig)
**out = **in
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
if in == nil {
return nil
}
out := new(DeploymentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
if in == nil {
return nil
}
out := new(DeploymentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateDeployment)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
if in == nil {
return nil
}
out := new(DeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
func (in *ReplicaSet) DeepCopy() *ReplicaSet {
if in == nil {
return nil
}
out := new(ReplicaSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
if in == nil {
return nil
}
out := new(ReplicaSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicaSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
if in == nil {
return nil
}
out := new(ReplicaSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
if in == nil {
return nil
}
out := new(ReplicaSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
if in == nil {
return nil
}
out := new(ReplicaSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
func (in *RollbackConfig) DeepCopy() *RollbackConfig {
if in == nil {
return nil
}
out := new(RollbackConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
*out = *in
out.MaxUnavailable = in.MaxUnavailable
out.MaxSurge = in.MaxSurge
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
if in == nil {
return nil
}
out := new(RollingUpdateDaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
*out = *in
out.MaxUnavailable = in.MaxUnavailable
out.MaxSurge = in.MaxSurge
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
if in == nil {
return nil
}
out := new(RollingUpdateDeployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
if in == nil {
return nil
}
out := new(RollingUpdateStatefulSetStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
func (in *StatefulSet) DeepCopy() *StatefulSet {
if in == nil {
return nil
}
out := new(StatefulSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
if in == nil {
return nil
}
out := new(StatefulSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StatefulSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
func (in *StatefulSetList) DeepCopy() *StatefulSetList {
if in == nil {
return nil
}
out := new(StatefulSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals.
func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals {
if in == nil {
return nil
}
out := new(StatefulSetOrdinals)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy {
if in == nil {
return nil
}
out := new(StatefulSetPersistentVolumeClaimRetentionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
if in.VolumeClaimTemplates != nil {
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
*out = make([]core.PersistentVolumeClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.PersistentVolumeClaimRetentionPolicy != nil {
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
**out = **in
}
if in.Ordinals != nil {
in, out := &in.Ordinals, &out.Ordinals
*out = new(StatefulSetOrdinals)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
if in == nil {
return nil
}
out := new(StatefulSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
*out = *in
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
*out = new(int64)
**out = **in
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]StatefulSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
if in == nil {
return nil
}
out := new(StatefulSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateStatefulSetStrategy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
if in == nil {
return nil
}
out := new(StatefulSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,12 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- thockin
- smarterclayton
- wojtek-t
- deads2k
- caesarxuchao
- sttts
- dims
emeritus_reviewers:
- ncdc

View File

@ -0,0 +1,38 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric
// specs when converting the `Metrics` field from autoscaling/v2beta1
const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics"
// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric
// statuses when converting the `CurrentMetrics` field from autoscaling/v2beta1
const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics"
// HorizontalPodAutoscalerConditionsAnnotation is the annotation which holds the conditions
// of an HPA when converting the `Conditions` field from autoscaling/v2beta1
const HorizontalPodAutoscalerConditionsAnnotation = "autoscaling.alpha.kubernetes.io/conditions"
// DefaultCPUUtilization is the default value for CPU utilization, provided no other
// metrics are present. This is here because it's used by both the v2beta1 defaulting
// logic, and the pseudo-defaulting done in v1 conversion.
const DefaultCPUUtilization = 80
// BehaviorSpecsAnnotation is the annotation which holds the HPA constraints specs
// when converting the `Behavior` field from autoscaling/v2beta2
const BehaviorSpecsAnnotation = "autoscaling.alpha.kubernetes.io/behavior"

View File

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling"

View File

@ -0,0 +1,58 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to serialize round-tripped fields from later API versions,
// and returns false if no changes were made and the original input object was returned.
// It should always be called when converting internal -> external versions, prior
// to setting any of the custom annotations:
//
// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations)
// externalObj.Annotations = annotations
//
// if internal.SomeField != nil {
// if !copiedAnnotations {
// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations)
// copiedAnnotations = true
// }
// externalObj.Annotations[...] = json.Marshal(...)
// }
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
_, hasMetricsSpecs := in[MetricSpecsAnnotation]
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]
_, hasMetricsStatuses := in[MetricStatusesAnnotation]
_, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation]
if hasMetricsSpecs || hasBehaviorSpecs || hasMetricsStatuses || hasConditions {
out = DeepCopyStringMap(in)
delete(out, MetricSpecsAnnotation)
delete(out, BehaviorSpecsAnnotation)
delete(out, MetricStatusesAnnotation)
delete(out, HorizontalPodAutoscalerConditionsAnnotation)
return out, true
}
return in, false
}
// DeepCopyStringMap returns a copy of the input map.
// If input is nil, an empty map is returned.
func DeepCopyStringMap(in map[string]string) map[string]string {
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Scale{},
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
)
return nil
}

View File

@ -0,0 +1,559 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Scale represents a scaling request for a resource.
type Scale struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// +optional
Spec ScaleSpec
// status represents the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
// +optional
Status ScaleStatus
}
// ScaleSpec describes the attributes of a scale subresource.
type ScaleSpec struct {
// replicas is the desired number of instances for the scaled object.
// +optional
Replicas int32
}
// ScaleStatus represents the current status of a scale subresource.
type ScaleStatus struct {
// replicas is the actual number of observed instances of the scaled object.
Replicas int32
// label query over pods that should match the replicas count. This is same
// as the label selector but in the string format to avoid introspection
// by clients. The string will be in the same format as the query-param syntax.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector string
}
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReference struct {
// kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
Kind string
// name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string
// apiVersion is the API version of the referent
// +optional
APIVersion string
}
// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpec struct {
// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
// should be collected, as well as to actually change the replica count.
ScaleTargetRef CrossVersionObjectReference
// minReplicas is the lower limit for the number of replicas to which the autoscaler
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
// metric is configured. Scaling is active as long as at least one metric value is
// available.
// +optional
MinReplicas *int32
// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// It cannot be less that minReplicas.
MaxReplicas int32
// metrics contains the specifications for which to use to calculate the
// desired replica count (the maximum replica count across all metrics will
// be used). The desired replica count is calculated multiplying the
// ratio between the target value and the current value by the current
// number of pods. Ergo, metrics used must decrease as the pod count is
// increased, and vice-versa. See the individual metric source types for
// more information about how each type of metric must respond.
// +optional
Metrics []MetricSpec
// behavior configures the scaling behavior of the target
// in both Up and Down directions (scaleUp and scaleDown fields respectively).
// If not set, the default HPAScalingRules for scale up and scale down are used.
// +optional
Behavior *HorizontalPodAutoscalerBehavior
}
// HorizontalPodAutoscalerBehavior configures a scaling behavior for Up and Down direction
// (scaleUp and scaleDown fields respectively).
type HorizontalPodAutoscalerBehavior struct {
// scaleUp is scaling policy for scaling Up.
// If not set, the default value is the higher of:
// * increase no more than 4 pods per 60 seconds
// * double the number of pods per 60 seconds
// No stabilization is used.
// +optional
ScaleUp *HPAScalingRules
// scaleDown is scaling policy for scaling Down.
// If not set, the default value is to allow to scale down to minReplicas pods, with a
// 300 second stabilization window (i.e., the highest recommendation for
// the last 300sec is used).
// +optional
ScaleDown *HPAScalingRules
}
// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction
type ScalingPolicySelect string
const (
// MaxPolicySelect selects the policy with the highest possible change.
MaxPolicySelect ScalingPolicySelect = "Max"
// MinPolicySelect selects the policy with the lowest possible change.
MinPolicySelect ScalingPolicySelect = "Min"
// DisabledPolicySelect disables the scaling in this direction.
DisabledPolicySelect ScalingPolicySelect = "Disabled"
)
// HPAScalingRules configures the scaling behavior for one direction.
// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
// They can limit the scaling velocity by specifying scaling policies.
// They can prevent flapping by specifying the stabilization window, so that the
// number of replicas is not set instantly, instead, the safest value from the stabilization
// window is chosen.
type HPAScalingRules struct {
// StabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down.
// StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour).
// If not set, use the default values:
// - For scale up: 0 (i.e. no stabilization is done).
// - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
// +optional
StabilizationWindowSeconds *int32
// selectPolicy is used to specify which policy should be used.
// If not set, the default value MaxPolicySelect is used.
// +optional
SelectPolicy *ScalingPolicySelect
// policies is a list of potential scaling polices which can used during scaling.
// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
// +optional
Policies []HPAScalingPolicy
}
// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.
type HPAScalingPolicyType string
const (
// PodsScalingPolicy is a policy used to specify a change in absolute number of pods.
PodsScalingPolicy HPAScalingPolicyType = "Pods"
// PercentScalingPolicy is a policy used to specify a relative amount of change with respect to
// the current number of pods.
PercentScalingPolicy HPAScalingPolicyType = "Percent"
)
// HPAScalingPolicy is a single policy which must hold true for a specified past interval.
type HPAScalingPolicy struct {
// Type is used to specify the scaling policy.
Type HPAScalingPolicyType
// Value contains the amount of change which is permitted by the policy.
// It must be greater than zero
Value int32
// PeriodSeconds specifies the window of time for which the policy should hold true.
// PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
PeriodSeconds int32
}
// MetricSourceType indicates the type of metric.
type MetricSourceType string
const (
// ObjectMetricSourceType is a metric describing a kubernetes object
// (for example, hits-per-second on an Ingress object).
ObjectMetricSourceType MetricSourceType = "Object"
// PodsMetricSourceType is a metric describing each pod in the current scale
// target (for example, transactions-processed-per-second). The values
// will be averaged together before being compared to the target value.
PodsMetricSourceType MetricSourceType = "Pods"
// ResourceMetricSourceType is a resource metric known to Kubernetes, as
// specified in requests and limits, describing each pod in the current
// scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ResourceMetricSourceType MetricSourceType = "Resource"
// ExternalMetricSourceType is a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
ExternalMetricSourceType MetricSourceType = "External"
// ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as
// specified in requests and limits, describing a single container in each pod in the current
// scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ContainerResourceMetricSourceType MetricSourceType = "ContainerResource"
)
// MetricSpec specifies how to scale based on a single metric
// (only `type` and one other matching field should be set at once).
type MetricSpec struct {
// Type is the type of metric source. It should be one of "Object",
// "Pods" or "Resource", each mapping to a matching field in the object.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object
// (for example, hits-per-second on an Ingress object).
// +optional
Object *ObjectMetricSource
// Pods refers to a metric describing each pod in the current scale target
// (for example, transactions-processed-per-second). The values will be
// averaged together before being compared to the target value.
// +optional
Pods *PodsMetricSource
// Resource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricSource
// ContainerResource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing a single container in each pod of the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
ContainerResource *ContainerResourceMetricSource
// External refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricSource
}
// ObjectMetricSource indicates how to scale on a metric describing a
// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricSource struct {
DescribedObject CrossVersionObjectReference
Target MetricTarget
Metric MetricIdentifier
}
// PodsMetricSource indicates how to scale on a metric describing each pod in
// the current scale target (for example, transactions-processed-per-second).
// The values will be averaged together before being compared to the target
// value.
type PodsMetricSource struct {
// metric identifies the target metric by name and selector
Metric MetricIdentifier
// target specifies the target value for the given metric
Target MetricTarget
}
// ResourceMetricSource indicates how to scale on a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). The values will be averaged
// together before being compared to the target. Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source. Only one "target" type
// should be set.
type ResourceMetricSource struct {
// Name is the name of the resource in question.
Name api.ResourceName
// Target specifies the target value for the given metric
Target MetricTarget
}
// ContainerResourceMetricSource indicates how to scale on a resource metric known to
// Kubernetes, as specified in the requests and limits, describing a single container in
// each of the pods of the current scale target(e.g. CPU or memory). The values will be
// averaged together before being compared to the target. Such metrics are built into
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source. Only one "target" type
// should be set.
type ContainerResourceMetricSource struct {
// name is the name of the of the resource
Name api.ResourceName
// container is the name of the container in the pods of the scaling target.
Container string
// target specifies the target value for the given metric
Target MetricTarget
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
type ExternalMetricSource struct {
// Metric identifies the target metric by name and selector
Metric MetricIdentifier
// Target specifies the target value for the given metric
Target MetricTarget
}
// MetricIdentifier defines the name and optionally selector for a metric
type MetricIdentifier struct {
// Name is the name of the given metric
Name string
// Selector is the selector for the given metric
// it is the string-encoded form of a standard kubernetes label selector
// +optional
Selector *metav1.LabelSelector
}
// MetricTarget defines the target value, average value, or average utilization of a specific metric
type MetricTarget struct {
// Type represents whether the metric type is Utilization, Value, or AverageValue
Type MetricTargetType
// Value is the target value of the metric (as a quantity).
Value *resource.Quantity
// TargetAverageValue is the target value of the average of the
// metric across all relevant pods (as a quantity)
AverageValue *resource.Quantity
// AverageUtilization is the target value of the average of the
// resource metric across all relevant pods, represented as a percentage of
// the requested value of the resource for the pods.
// Currently only valid for Resource metric source type
AverageUtilization *int32
}
// MetricTargetType specifies the type of metric being targeted, and should be either
// "Value", "AverageValue", or "Utilization"
type MetricTargetType string
const (
// UtilizationMetricType is a possible value for MetricTarget.Type.
UtilizationMetricType MetricTargetType = "Utilization"
// ValueMetricType is a possible value for MetricTarget.Type.
ValueMetricType MetricTargetType = "Value"
// AverageValueMetricType is a possible value for MetricTarget.Type.
AverageValueMetricType MetricTargetType = "AverageValue"
)
// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatus struct {
// ObservedGeneration is the most recent generation observed by this autoscaler.
// +optional
ObservedGeneration *int64
// LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
// used by the autoscaler to control how often the number of pods is changed.
// +optional
LastScaleTime *metav1.Time
// CurrentReplicas is current number of replicas of pods managed by this autoscaler,
// as last seen by the autoscaler.
CurrentReplicas int32
// DesiredReplicas is the desired number of replicas of pods managed by this autoscaler,
// as last calculated by the autoscaler.
DesiredReplicas int32
// CurrentMetrics is the last read state of the metrics used by this autoscaler.
// +optional
CurrentMetrics []MetricStatus
// Conditions is the set of conditions required for this autoscaler to scale its target,
// and indicates whether or not those conditions are met.
Conditions []HorizontalPodAutoscalerCondition
}
// ConditionStatus indicates the status of a condition (true, false, or unknown).
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// HorizontalPodAutoscalerConditionType are the valid conditions of
// a HorizontalPodAutoscaler.
type HorizontalPodAutoscalerConditionType string
const (
// ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
// AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
// such as being in a backoff window, or being unable to access/update the target scale.
AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
// ScalingLimited indicates that the calculated scale based on metrics would be above or
// below the range for the HPA, and has thus been capped.
ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited"
)
// HorizontalPodAutoscalerCondition describes the state of
// a HorizontalPodAutoscaler at a certain point.
type HorizontalPodAutoscalerCondition struct {
// Type describes the current condition
Type HorizontalPodAutoscalerConditionType
// Status is the status of the condition (True, False, Unknown)
Status ConditionStatus
// LastTransitionTime is the last time the condition transitioned from
// one status to another
// +optional
LastTransitionTime metav1.Time
// Reason is the reason for the condition's last transition.
// +optional
Reason string
// Message is a human-readable explanation containing details about
// the transition
// +optional
Message string
}
// MetricStatus describes the last-read state of a single metric.
type MetricStatus struct {
// Type is the type of metric source. It will be one of "Object",
// "Pods" or "Resource", each corresponds to a matching field in the object.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object
// (for example, hits-per-second on an Ingress object).
// +optional
Object *ObjectMetricStatus
// Pods refers to a metric describing each pod in the current scale target
// (for example, transactions-processed-per-second). The values will be
// averaged together before being compared to the target value.
// +optional
Pods *PodsMetricStatus
// Resource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricStatus
// ContainerResource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing a single container in each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
ContainerResource *ContainerResourceMetricStatus
// External refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricStatus
}
// ObjectMetricStatus indicates the current value of a metric describing a
// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricStatus struct {
Metric MetricIdentifier
Current MetricValueStatus
DescribedObject CrossVersionObjectReference
}
// PodsMetricStatus indicates the current value of a metric describing each pod in
// the current scale target (for example, transactions-processed-per-second).
type PodsMetricStatus struct {
Metric MetricIdentifier
Current MetricValueStatus
}
// ResourceMetricStatus indicates the current value of a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ResourceMetricStatus struct {
// name is the name of the resource in question.
Name api.ResourceName
Current MetricValueStatus
}
// ContainerResourceMetricStatus indicates the current value of a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ContainerResourceMetricStatus struct {
// name is the name of the resource in question.
Name api.ResourceName
Container string
Current MetricValueStatus
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
type ExternalMetricStatus struct {
Metric MetricIdentifier
Current MetricValueStatus
}
// MetricValueStatus indicates the current value of a metric.
type MetricValueStatus struct {
Value *resource.Quantity
AverageValue *resource.Quantity
AverageUtilization *int32
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HorizontalPodAutoscaler is the configuration for a horizontal pod
// autoscaler, which automatically manages the replica count of any resource
// implementing the scale subresource based on the metrics specified.
type HorizontalPodAutoscaler struct {
metav1.TypeMeta
// Metadata is the standard object metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// spec is the specification for the behaviour of the autoscaler.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// +optional
Spec HorizontalPodAutoscalerSpec
// status is the current information about the autoscaler.
// +optional
Status HorizontalPodAutoscalerStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.
type HorizontalPodAutoscalerList struct {
metav1.TypeMeta
// Metadata is the standard list metadata.
// +optional
metav1.ListMeta
// items is the list of horizontal pod autoscaler objects.
Items []HorizontalPodAutoscaler
}

View File

@ -0,0 +1,670 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package autoscaling
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) {
*out = *in
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource.
func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource {
if in == nil {
return nil
}
out := new(ContainerResourceMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) {
*out = *in
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus.
func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus {
if in == nil {
return nil
}
out := new(ContainerResourceMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
if in == nil {
return nil
}
out := new(CrossVersionObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
if in == nil {
return nil
}
out := new(ExternalMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
if in == nil {
return nil
}
out := new(ExternalMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy.
func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy {
if in == nil {
return nil
}
out := new(HPAScalingPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
*out = *in
if in.StabilizationWindowSeconds != nil {
in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds
*out = new(int32)
**out = **in
}
if in.SelectPolicy != nil {
in, out := &in.SelectPolicy, &out.SelectPolicy
*out = new(ScalingPolicySelect)
**out = **in
}
if in.Policies != nil {
in, out := &in.Policies, &out.Policies
*out = make([]HPAScalingPolicy, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules.
func (in *HPAScalingRules) DeepCopy() *HPAScalingRules {
if in == nil {
return nil
}
out := new(HPAScalingRules)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscaler)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) {
*out = *in
if in.ScaleUp != nil {
in, out := &in.ScaleUp, &out.ScaleUp
*out = new(HPAScalingRules)
(*in).DeepCopyInto(*out)
}
if in.ScaleDown != nil {
in, out := &in.ScaleDown, &out.ScaleDown
*out = new(HPAScalingRules)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior.
func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerBehavior)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HorizontalPodAutoscaler, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
*out = *in
out.ScaleTargetRef = in.ScaleTargetRef
if in.MinReplicas != nil {
in, out := &in.MinReplicas, &out.MinReplicas
*out = new(int32)
**out = **in
}
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]MetricSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(HorizontalPodAutoscalerBehavior)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
*out = *in
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
*out = new(int64)
**out = **in
}
if in.LastScaleTime != nil {
in, out := &in.LastScaleTime, &out.LastScaleTime
*out = (*in).DeepCopy()
}
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]MetricStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]HorizontalPodAutoscalerCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier.
func (in *MetricIdentifier) DeepCopy() *MetricIdentifier {
if in == nil {
return nil
}
out := new(MetricIdentifier)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
*out = *in
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(ObjectMetricSource)
(*in).DeepCopyInto(*out)
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(PodsMetricSource)
(*in).DeepCopyInto(*out)
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(ResourceMetricSource)
(*in).DeepCopyInto(*out)
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(ContainerResourceMetricSource)
(*in).DeepCopyInto(*out)
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(ExternalMetricSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
func (in *MetricSpec) DeepCopy() *MetricSpec {
if in == nil {
return nil
}
out := new(MetricSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
*out = *in
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(ObjectMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(PodsMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(ResourceMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(ContainerResourceMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(ExternalMetricStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
func (in *MetricStatus) DeepCopy() *MetricStatus {
if in == nil {
return nil
}
out := new(MetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricTarget) DeepCopyInto(out *MetricTarget) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
x := (*in).DeepCopy()
*out = &x
}
if in.AverageValue != nil {
in, out := &in.AverageValue, &out.AverageValue
x := (*in).DeepCopy()
*out = &x
}
if in.AverageUtilization != nil {
in, out := &in.AverageUtilization, &out.AverageUtilization
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget.
func (in *MetricTarget) DeepCopy() *MetricTarget {
if in == nil {
return nil
}
out := new(MetricTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
x := (*in).DeepCopy()
*out = &x
}
if in.AverageValue != nil {
in, out := &in.AverageValue, &out.AverageValue
x := (*in).DeepCopy()
*out = &x
}
if in.AverageUtilization != nil {
in, out := &in.AverageUtilization, &out.AverageUtilization
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus.
func (in *MetricValueStatus) DeepCopy() *MetricValueStatus {
if in == nil {
return nil
}
out := new(MetricValueStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
*out = *in
out.DescribedObject = in.DescribedObject
in.Target.DeepCopyInto(&out.Target)
in.Metric.DeepCopyInto(&out.Metric)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
if in == nil {
return nil
}
out := new(ObjectMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Current.DeepCopyInto(&out.Current)
out.DescribedObject = in.DescribedObject
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
if in == nil {
return nil
}
out := new(ObjectMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
if in == nil {
return nil
}
out := new(PodsMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
if in == nil {
return nil
}
out := new(PodsMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
*out = *in
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
if in == nil {
return nil
}
out := new(ResourceMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
*out = *in
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
if in == nil {
return nil
}
out := new(ResourceMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scale) DeepCopyInto(out *Scale) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
func (in *Scale) DeepCopy() *Scale {
if in == nil {
return nil
}
out := new(Scale)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
if in == nil {
return nil
}
out := new(ScaleStatus)
in.DeepCopyInto(out)
return out
}

8
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS generated vendored Normal file
View File

@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
# approval on api packages bubbles to api-approvers
reviewers:
- sig-apps-api-reviewers
- sig-apps-api-approvers
labels:
- sig/apps

19
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package batch // import "k8s.io/kubernetes/pkg/apis/batch"

View File

@ -0,0 +1,56 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package batch
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "batch"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Job{},
&JobList{},
&CronJob{},
&CronJobList{},
)
return nil
}

766
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go generated vendored Normal file
View File

@ -0,0 +1,766 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package batch
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
)
const (
// Unprefixed labels are reserved for end-users
// so we will add a batch.kubernetes.io to designate these labels as official Kubernetes labels.
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions
labelPrefix = "batch.kubernetes.io/"
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
//
// Additionally, the apiserver and job controller use this string as a Job
// annotation, to mark Jobs that are being tracked using pod finalizers.
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
// apiserver and job controller will ignore this annotation and they will
// always track jobs using finalizers.
JobTrackingFinalizer = labelPrefix + "job-tracking"
// LegacyJobName and LegacyControllerUid are legacy labels that were set using unprefixed labels.
LegacyJobNameLabel = "job-name"
LegacyControllerUidLabel = "controller-uid"
// JobName is a user friendly way to refer to jobs and is set in the labels for jobs.
JobNameLabel = labelPrefix + LegacyJobNameLabel
// Controller UID is used for selectors and labels for jobs
ControllerUidLabel = labelPrefix + LegacyControllerUidLabel
// Annotation indicating the number of failures for the index corresponding
// to the pod, which are counted towards the backoff limit.
JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count"
// Annotation indicating the number of failures for the index corresponding
// to the pod, which don't count towards the backoff limit, according to the
// pod failure policy. When the annotation is absent zero is implied.
JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count"
// JobControllerName reserved value for the managedBy field for the built-in
// Job controller.
JobControllerName = "kubernetes.io/job-controller"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Job represents the configuration of a single job.
type Job struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of a job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec JobSpec
// Current status of a job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status JobStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// JobList is a collection of jobs.
type JobList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// items is the list of Jobs.
Items []Job
}
// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpec struct {
// Standard object's metadata of the jobs created from this template.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec JobSpec
}
// CompletionMode specifies how Pod completions of a Job are tracked.
type CompletionMode string
const (
// NonIndexedCompletion is a Job completion mode. In this mode, the Job is
// considered complete when there have been .spec.completions
// successfully completed Pods. Pod completions are homologous to each other.
NonIndexedCompletion CompletionMode = "NonIndexed"
// IndexedCompletion is a Job completion mode. In this mode, the Pods of a
// Job get an associated completion index from 0 to (.spec.completions - 1).
// The Job is considered complete when a Pod completes for each completion
// index.
IndexedCompletion CompletionMode = "Indexed"
)
// PodFailurePolicyAction specifies how a Pod failure is handled.
// +enum
type PodFailurePolicyAction string
const (
// This is an action which might be taken on a pod failure - mark the
// pod's job as Failed and terminate all running pods.
PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob"
// This is an action which might be taken on a pod failure - mark the
// Job's index as failed to avoid restarts within this index. This action
// can only be used when backoffLimitPerIndex is set.
// This value is beta-level.
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
// This is an action which might be taken on a pod failure - the counter towards
// .backoffLimit, represented by the job's .status.failed field, is not
// incremented and a replacement pod is created.
PodFailurePolicyActionIgnore PodFailurePolicyAction = "Ignore"
// This is an action which might be taken on a pod failure - the pod failure
// is handled in the default way - the counter towards .backoffLimit,
// represented by the job's .status.failed field, is incremented.
PodFailurePolicyActionCount PodFailurePolicyAction = "Count"
)
// +enum
type PodFailurePolicyOnExitCodesOperator string
const (
PodFailurePolicyOnExitCodesOpIn PodFailurePolicyOnExitCodesOperator = "In"
PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn"
)
// PodReplacementPolicy specifies the policy for creating pod replacements.
// +enum
type PodReplacementPolicy string
const (
// TerminatingOrFailed means that we recreate pods
// when they are terminating (has a metadata.deletionTimestamp) or failed.
TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed"
//Failed means to wait until a previously created Pod is fully terminated (has phase
//Failed or Succeeded) before creating a replacement Pod.
Failed PodReplacementPolicy = "Failed"
)
// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling
// a failed pod based on its container exit codes. In particular, it lookups the
// .state.terminated.exitCode for each app container and init container status,
// represented by the .status.containerStatuses and .status.initContainerStatuses
// fields in the Pod status, respectively. Containers completed with success
// (exit code 0) are excluded from the requirement check.
type PodFailurePolicyOnExitCodesRequirement struct {
// Restricts the check for exit codes to the container with the
// specified name. When null, the rule applies to all containers.
// When specified, it should match one the container or initContainer
// names in the pod template.
// +optional
ContainerName *string
// Represents the relationship between the container exit code(s) and the
// specified values. Containers completed with success (exit code 0) are
// excluded from the requirement check. Possible values are:
//
// - In: the requirement is satisfied if at least one container exit code
// (might be multiple if there are multiple containers not restricted
// by the 'containerName' field) is in the set of specified values.
// - NotIn: the requirement is satisfied if at least one container exit code
// (might be multiple if there are multiple containers not restricted
// by the 'containerName' field) is not in the set of specified values.
// Additional values are considered to be added in the future. Clients should
// react to an unknown operator by assuming the requirement is not satisfied.
Operator PodFailurePolicyOnExitCodesOperator
// Specifies the set of values. Each returned container exit code (might be
// multiple in case of multiple containers) is checked against this set of
// values with respect to the operator. The list of values must be ordered
// and must not contain duplicates. Value '0' cannot be used for the In operator.
// At least one element is required. At most 255 elements are allowed.
// +listType=set
Values []int32
}
// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching
// an actual pod condition type.
type PodFailurePolicyOnPodConditionsPattern struct {
// Specifies the required Pod condition type. To match a pod condition
// it is required that specified type equals the pod condition type.
Type api.PodConditionType
// Specifies the required Pod condition status. To match a pod condition
// it is required that the specified status equals the pod condition status.
// Defaults to True.
Status api.ConditionStatus
}
// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.
// One of OnExitCodes and onPodConditions, but not both, can be used in each rule.
type PodFailurePolicyRule struct {
// Specifies the action taken on a pod failure when the requirements are satisfied.
// Possible values are:
//
// - FailJob: indicates that the pod's job is marked as Failed and all
// running pods are terminated.
// - FailIndex: indicates that the pod's index is marked as Failed and will
// not be restarted.
// This value is alpha-level. It can be used when the
// `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
// - Ignore: indicates that the counter towards the .backoffLimit is not
// incremented and a replacement pod is created.
// - Count: indicates that the pod is handled in the default way - the
// counter towards the .backoffLimit is incremented.
// Additional values are considered to be added in the future. Clients should
// react to an unknown action by skipping the rule.
Action PodFailurePolicyAction
// Represents the requirement on the container exit codes.
// +optional
OnExitCodes *PodFailurePolicyOnExitCodesRequirement
// Represents the requirement on the pod conditions. The requirement is represented
// as a list of pod condition patterns. The requirement is satisfied if at
// least one pattern matches an actual pod condition. At most 20 elements are allowed.
// +listType=atomic
// +optional
OnPodConditions []PodFailurePolicyOnPodConditionsPattern
}
// PodFailurePolicy describes how failed pods influence the backoffLimit.
type PodFailurePolicy struct {
// A list of pod failure policy rules. The rules are evaluated in order.
// Once a rule matches a Pod failure, the remaining of the rules are ignored.
// When no rule matches the Pod failure, the default handling applies - the
// counter of pod failures is incremented and it is checked against
// the backoffLimit. At most 20 elements are allowed.
// +listType=atomic
Rules []PodFailurePolicyRule
}
// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.
type SuccessPolicy struct {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
// +listType=atomic
Rules []SuccessPolicyRule
}
// SuccessPolicyRule describes rule for declaring a Job as succeeded.
// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
type SuccessPolicyRule struct {
// succeededIndexes specifies the set of indexes
// which need to be contained in the actual set of the succeeded indexes for the Job.
// The list of indexes must be within 0 to ".spec.completions-1" and
// must not contain duplicates. At least one element is required.
// The indexes are represented as intervals separated by commas.
// The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
// The number are listed in represented by the first and last element of the series,
// separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// When this field is null, this field doesn't default to any value
// and is never evaluated at any time.
//
// +optional
SucceededIndexes *string
// succeededCount specifies the minimal required size of the actual set of the succeeded indexes
// for the Job. When succeededCount is used along with succeededIndexes, the check is
// constrained only to the set of indexes specified by succeededIndexes.
// For example, given that succeededIndexes is "1-4", succeededCount is "3",
// and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
// because only "1" and "3" indexes are considered in that rules.
// When this field is null, this doesn't default to any value and
// is never evaluated at any time.
// When specified it needs to be a positive integer.
//
// +optional
SucceededCount *int32
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
// +optional
Parallelism *int32
// Specifies the desired number of successfully finished pods the
// job should be run with. Setting to null means that the success of any
// pod signals the success of all pods, and allows parallelism to have any positive
// value. Setting to 1 means that parallelism is limited to 1 and the success of that
// pod signals the success of the job.
// +optional
Completions *int32
// Specifies the policy of handling failed pods. In particular, it allows to
// specify the set of actions and conditions which need to be
// satisfied to take the associated action.
// If empty, the default behaviour applies - the counter of failed pods,
// represented by the jobs's .status.failed field, is incremented and it is
// checked against the backoffLimit. This field cannot be used in combination
// with .spec.podTemplate.spec.restartPolicy=OnFailure.
//
// +optional
PodFailurePolicy *PodFailurePolicy
// successPolicy specifies the policy when the Job can be declared as succeeded.
// If empty, the default behavior applies - the Job is declared as succeeded
// only when the number of succeeded pods equals to the completions.
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
// This field is beta-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (enabled by default).
// +optional
SuccessPolicy *SuccessPolicy
// Specifies the duration in seconds relative to the startTime that the job
// may be continuously active before the system tries to terminate it; value
// must be positive integer. If a Job is suspended (at creation or through an
// update), this timer will effectively be stopped and reset when the Job is
// resumed again.
// +optional
ActiveDeadlineSeconds *int64
// Optional number of retries before marking this job failed.
// Defaults to 6
// +optional
BackoffLimit *int32
// Specifies the limit for the number of retries within an
// index before marking this index as failed. When enabled the number of
// failures per index is kept in the pod's
// batch.kubernetes.io/job-index-failure-count annotation. It can only
// be set when Job's completionMode=Indexed, and the Pod's restart
// policy is Never. The field is immutable.
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
BackoffLimitPerIndex *int32
// Specifies the maximal number of failed indexes before marking the Job as
// failed, when backoffLimitPerIndex is set. Once the number of failed
// indexes exceeds this number the entire Job is marked as Failed and its
// execution is terminated. When left as null the job continues execution of
// all of its indexes and is marked with the `Complete` Job condition.
// It can only be specified when backoffLimitPerIndex is set.
// It can be null or up to completions. It is required and must be
// less than or equal to 10^4 when is completions greater than 10^5.
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
MaxFailedIndexes *int32
// TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed
// Optional number of failed pods to retain.
// +optional
// FailedPodsLimit *int32
// A label query over pods that should match the pod count.
// Normally, the system sets this field for you.
// +optional
Selector *metav1.LabelSelector
// manualSelector controls generation of pod labels and pod selectors.
// Leave `manualSelector` unset unless you are certain what you are doing.
// When false or unset, the system pick labels unique to this job
// and appends those labels to the pod template. When true,
// the user is responsible for picking unique labels and specifying
// the selector. Failure to pick a unique label may cause this
// and other jobs to not function correctly. However, You may see
// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
// API.
// +optional
ManualSelector *bool
// Describes the pod that will be created when executing a job.
// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
Template api.PodTemplateSpec
// ttlSecondsAfterFinished limits the lifetime of a Job that has finished
// execution (either Complete or Failed). If this field is set,
// ttlSecondsAfterFinished after the Job finishes, it is eligible to be
// automatically deleted. When the Job is being deleted, its lifecycle
// guarantees (e.g. finalizers) will be honored. If this field is unset,
// the Job won't be automatically deleted. If this field is set to zero,
// the Job becomes eligible to be deleted immediately after it finishes.
// +optional
TTLSecondsAfterFinished *int32
// completionMode specifies how Pod completions are tracked. It can be
// `NonIndexed` (default) or `Indexed`.
//
// `NonIndexed` means that the Job is considered complete when there have
// been .spec.completions successfully completed Pods. Each Pod completion is
// homologous to each other.
//
// `Indexed` means that the Pods of a
// Job get an associated completion index from 0 to (.spec.completions - 1),
// available in the annotation batch.kubernetes.io/job-completion-index.
// The Job is considered complete when there is one successfully completed Pod
// for each index.
// When value is `Indexed`, .spec.completions must be specified and
// `.spec.parallelism` must be less than or equal to 10^5.
// In addition, The Pod name takes the form
// `$(job-name)-$(index)-$(random-string)`,
// the Pod hostname takes the form `$(job-name)-$(index)`.
//
// More completion modes can be added in the future.
// If the Job controller observes a mode that it doesn't recognize, which
// is possible during upgrades due to version skew, the controller
// skips updates for the Job.
// +optional
CompletionMode *CompletionMode
// suspend specifies whether the Job controller should create Pods or not. If
// a Job is created with suspend set to true, no Pods are created by the Job
// controller. If a Job is suspended after creation (i.e. the flag goes from
// false to true), the Job controller will delete all active Pods associated
// with this Job. Users must design their workload to gracefully handle this.
// Suspending a Job will reset the StartTime field of the Job, effectively
// resetting the ActiveDeadlineSeconds timer too. Defaults to false.
//
// +optional
Suspend *bool
// podReplacementPolicy specifies when to create replacement Pods.
// Possible values are:
// - TerminatingOrFailed means that we recreate pods
// when they are terminating (has a metadata.deletionTimestamp) or failed.
// - Failed means to wait until a previously created Pod is fully terminated (has phase
// Failed or Succeeded) before creating a replacement Pod.
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
// This is on by default.
// +optional
PodReplacementPolicy *PodReplacementPolicy
// ManagedBy field indicates the controller that manages a Job. The k8s Job
// controller reconciles jobs which don't have this field at all or the field
// value is the reserved string `kubernetes.io/job-controller`, but skips
// reconciling Jobs with a custom value for this field.
// The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
// all characters before the first "/" must be a valid subdomain as defined
// by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
//
// This field is beta-level. The job controller accepts setting the field
// when the feature gate JobManagedBy is enabled (enabled by default).
// +optional
ManagedBy *string
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// The latest available observations of an object's current state. When a Job
// fails, one of the conditions will have type "Failed" and status true. When
// a Job is suspended, one of the conditions will have type "Suspended" and
// status true; when the Job is resumed, the status of this condition will
// become false. When a Job is completed, one of the conditions will have
// type "Complete" and status true.
//
// A job is considered finished when it is in a terminal condition, either
// "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions.
// Additionally, it cannot be in the "Complete" and "FailureTarget" conditions.
// The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled.
//
// +optional
Conditions []JobCondition
// Represents time when the job controller started processing a job. When a
// Job is created in the suspended state, this field is not set until the
// first time it is resumed. This field is reset every time a Job is resumed
// from suspension. It is represented in RFC3339 form and is in UTC.
//
// Once set, the field can only be removed when the job is suspended.
// The field cannot be modified while the job is unsuspended or finished.
//
// +optional
StartTime *metav1.Time
// Represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
// The completion time is set when the job finishes successfully, and only then.
// The value cannot be updated or removed. The value indicates the same or
// later point in time as the startTime field.
// +optional
CompletionTime *metav1.Time
// The number of pending and running pods which are not terminating (without
// a deletionTimestamp).
// The value is zero for finished jobs.
// +optional
Active int32
// The number of pods which are terminating (in phase Pending or Running
// and have a deletionTimestamp).
//
// This field is beta-level. The job controller populates the field when
// the feature gate JobPodReplacementPolicy is enabled (enabled by default).
// +optional
Terminating *int32
// The number of active pods which have a Ready condition and are not
// terminating (without a deletionTimestamp).
// +optional
Ready *int32
// The number of pods which reached phase Succeeded.
// The value increases monotonically for a given spec. However, it may
// decrease in reaction to scale down of elastic indexed jobs.
// +optional
Succeeded int32
// The number of pods which reached phase Failed.
// The value increases monotonically.
// +optional
Failed int32
// completedIndexes holds the completed indexes when .spec.completionMode =
// "Indexed" in a text format. The indexes are represented as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
// more consecutive numbers are compressed and represented by the first and
// last element of the series, separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// +optional
CompletedIndexes string
// FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set.
// The indexes are represented in the text format analogous as for the
// `completedIndexes` field, ie. they are kept as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
// more consecutive numbers are compressed and represented by the first and
// last element of the series, separated by a hyphen.
// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// The set of failed indexes cannot overlap with the set of completed indexes.
//
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
FailedIndexes *string
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
// the job controller hasn't yet accounted for in the status counters.
//
// The job controller creates pods with a finalizer. When a pod terminates
// (succeeded or failed), the controller does three steps to account for it
// in the job status:
//
// 1. Add the pod UID to the corresponding array in this field.
// 2. Remove the pod finalizer.
// 3. Remove the pod UID from the array while increasing the corresponding
// counter.
//
// Old jobs might not be tracked using this field, in which case the field
// remains null.
// The structure is empty for finished jobs.
// +optional
UncountedTerminatedPods *UncountedTerminatedPods
}
// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
// been accounted in Job status counters.
type UncountedTerminatedPods struct {
// succeeded holds UIDs of succeeded Pods.
// +listType=set
// +optional
Succeeded []types.UID
// failed holds UIDs of failed Pods.
// +listType=set
// +optional
Failed []types.UID
}
// JobConditionType is a valid value for JobCondition.Type
type JobConditionType string
// These are valid conditions of a job.
const (
// JobSuspended means the job has been suspended.
JobSuspended JobConditionType = "Suspended"
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
// JobFailed means the job has failed its execution.
JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution.
JobFailureTarget JobConditionType = "FailureTarget"
// JobSuccessCriteriaMet means the Job has reached a success state and will be marked as Completed
JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition.
Type JobConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// Last time the condition was checked.
// +optional
LastProbeTime metav1.Time
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time
// (brief) reason for the condition's last transition.
// +optional
Reason string
// Human readable message indicating details about last transition.
// +optional
Message string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CronJob represents the configuration of a single cron job.
type CronJob struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of a cron job, including the schedule.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec CronJobSpec
// Current status of a cron job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status CronJobStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CronJobList is a collection of cron jobs.
type CronJobList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// items is the list of CronJobs.
Items []CronJob
}
// CronJobSpec describes how the job execution will look like and when it will actually run.
type CronJobSpec struct {
// The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
Schedule string
// The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
// If not specified, this will default to the time zone of the kube-controller-manager process.
// The set of valid time zone names and the time zone offset is loaded from the system-wide time zone
// database by the API server during CronJob validation and the controller manager during execution.
// If no system-wide time zone database can be found a bundled version of the database is used instead.
// If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host
// configuration, the controller will stop creating new new Jobs and will create a system event with the
// reason UnknownTimeZone.
// More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
// +optional
TimeZone *string
// Optional deadline in seconds for starting the job if it misses scheduled
// time for any reason. Missed jobs executions will be counted as failed ones.
// +optional
StartingDeadlineSeconds *int64
// Specifies how to treat concurrent executions of a Job.
// Valid values are:
//
// - "Allow" (default): allows CronJobs to run concurrently;
// - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
// - "Replace": cancels currently running job and replaces it with a new one
// +optional
ConcurrencyPolicy ConcurrencyPolicy
// This flag tells the controller to suspend subsequent executions, it does
// not apply to already started executions. Defaults to false.
// +optional
Suspend *bool
// Specifies the job that will be created when executing a CronJob.
JobTemplate JobTemplateSpec
// The number of successful finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
SuccessfulJobsHistoryLimit *int32
// The number of failed finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
FailedJobsHistoryLimit *int32
}
// ConcurrencyPolicy describes how the job will be handled.
// Only one of the following concurrent policies may be specified.
// If none of the following policies is specified, the default one
// is AllowConcurrent.
type ConcurrencyPolicy string
const (
// AllowConcurrent allows CronJobs to run concurrently.
AllowConcurrent ConcurrencyPolicy = "Allow"
// ForbidConcurrent forbids concurrent runs, skipping next run if previous
// hasn't finished yet.
ForbidConcurrent ConcurrencyPolicy = "Forbid"
// ReplaceConcurrent cancels currently running job and replaces it with a new one.
ReplaceConcurrent ConcurrencyPolicy = "Replace"
)
// CronJobStatus represents the current state of a cron job.
type CronJobStatus struct {
// A list of pointers to currently running jobs.
// +optional
Active []api.ObjectReference
// Information when was the last time the job was successfully scheduled.
// +optional
LastScheduleTime *metav1.Time
// Information when was the last time the job successfully completed.
// +optional
LastSuccessfulTime *metav1.Time
}

View File

@ -0,0 +1,567 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package batch
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJob) DeepCopyInto(out *CronJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob.
func (in *CronJob) DeepCopy() *CronJob {
if in == nil {
return nil
}
out := new(CronJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CronJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobList) DeepCopyInto(out *CronJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CronJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList.
func (in *CronJobList) DeepCopy() *CronJobList {
if in == nil {
return nil
}
out := new(CronJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CronJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) {
*out = *in
if in.TimeZone != nil {
in, out := &in.TimeZone, &out.TimeZone
*out = new(string)
**out = **in
}
if in.StartingDeadlineSeconds != nil {
in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
in.JobTemplate.DeepCopyInto(&out.JobTemplate)
if in.SuccessfulJobsHistoryLimit != nil {
in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedJobsHistoryLimit != nil {
in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec.
func (in *CronJobSpec) DeepCopy() *CronJobSpec {
if in == nil {
return nil
}
out := new(CronJobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) {
*out = *in
if in.Active != nil {
in, out := &in.Active, &out.Active
*out = make([]core.ObjectReference, len(*in))
copy(*out, *in)
}
if in.LastScheduleTime != nil {
in, out := &in.LastScheduleTime, &out.LastScheduleTime
*out = (*in).DeepCopy()
}
if in.LastSuccessfulTime != nil {
in, out := &in.LastSuccessfulTime, &out.LastSuccessfulTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus.
func (in *CronJobStatus) DeepCopy() *CronJobStatus {
if in == nil {
return nil
}
out := new(CronJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Job) DeepCopyInto(out *Job) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job.
func (in *Job) DeepCopy() *Job {
if in == nil {
return nil
}
out := new(Job)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Job) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobCondition) DeepCopyInto(out *JobCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition.
func (in *JobCondition) DeepCopy() *JobCondition {
if in == nil {
return nil
}
out := new(JobCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobList) DeepCopyInto(out *JobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Job, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList.
func (in *JobList) DeepCopy() *JobList {
if in == nil {
return nil
}
out := new(JobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *JobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobSpec) DeepCopyInto(out *JobSpec) {
*out = *in
if in.Parallelism != nil {
in, out := &in.Parallelism, &out.Parallelism
*out = new(int32)
**out = **in
}
if in.Completions != nil {
in, out := &in.Completions, &out.Completions
*out = new(int32)
**out = **in
}
if in.PodFailurePolicy != nil {
in, out := &in.PodFailurePolicy, &out.PodFailurePolicy
*out = new(PodFailurePolicy)
(*in).DeepCopyInto(*out)
}
if in.SuccessPolicy != nil {
in, out := &in.SuccessPolicy, &out.SuccessPolicy
*out = new(SuccessPolicy)
(*in).DeepCopyInto(*out)
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.BackoffLimit != nil {
in, out := &in.BackoffLimit, &out.BackoffLimit
*out = new(int32)
**out = **in
}
if in.BackoffLimitPerIndex != nil {
in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex
*out = new(int32)
**out = **in
}
if in.MaxFailedIndexes != nil {
in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ManualSelector != nil {
in, out := &in.ManualSelector, &out.ManualSelector
*out = new(bool)
**out = **in
}
in.Template.DeepCopyInto(&out.Template)
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
if in.CompletionMode != nil {
in, out := &in.CompletionMode, &out.CompletionMode
*out = new(CompletionMode)
**out = **in
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
if in.PodReplacementPolicy != nil {
in, out := &in.PodReplacementPolicy, &out.PodReplacementPolicy
*out = new(PodReplacementPolicy)
**out = **in
}
if in.ManagedBy != nil {
in, out := &in.ManagedBy, &out.ManagedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec.
func (in *JobSpec) DeepCopy() *JobSpec {
if in == nil {
return nil
}
out := new(JobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobStatus) DeepCopyInto(out *JobStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]JobCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Terminating != nil {
in, out := &in.Terminating, &out.Terminating
*out = new(int32)
**out = **in
}
if in.Ready != nil {
in, out := &in.Ready, &out.Ready
*out = new(int32)
**out = **in
}
if in.FailedIndexes != nil {
in, out := &in.FailedIndexes, &out.FailedIndexes
*out = new(string)
**out = **in
}
if in.UncountedTerminatedPods != nil {
in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods
*out = new(UncountedTerminatedPods)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus.
func (in *JobStatus) DeepCopy() *JobStatus {
if in == nil {
return nil
}
out := new(JobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec.
func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec {
if in == nil {
return nil
}
out := new(JobTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicy) DeepCopyInto(out *PodFailurePolicy) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PodFailurePolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicy.
func (in *PodFailurePolicy) DeepCopy() *PodFailurePolicy {
if in == nil {
return nil
}
out := new(PodFailurePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopyInto(out *PodFailurePolicyOnExitCodesRequirement) {
*out = *in
if in.ContainerName != nil {
in, out := &in.ContainerName, &out.ContainerName
*out = new(string)
**out = **in
}
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]int32, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnExitCodesRequirement.
func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopy() *PodFailurePolicyOnExitCodesRequirement {
if in == nil {
return nil
}
out := new(PodFailurePolicyOnExitCodesRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopyInto(out *PodFailurePolicyOnPodConditionsPattern) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnPodConditionsPattern.
func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopy() *PodFailurePolicyOnPodConditionsPattern {
if in == nil {
return nil
}
out := new(PodFailurePolicyOnPodConditionsPattern)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicyRule) DeepCopyInto(out *PodFailurePolicyRule) {
*out = *in
if in.OnExitCodes != nil {
in, out := &in.OnExitCodes, &out.OnExitCodes
*out = new(PodFailurePolicyOnExitCodesRequirement)
(*in).DeepCopyInto(*out)
}
if in.OnPodConditions != nil {
in, out := &in.OnPodConditions, &out.OnPodConditions
*out = make([]PodFailurePolicyOnPodConditionsPattern, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyRule.
func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule {
if in == nil {
return nil
}
out := new(PodFailurePolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicy) DeepCopyInto(out *SuccessPolicy) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]SuccessPolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicy.
func (in *SuccessPolicy) DeepCopy() *SuccessPolicy {
if in == nil {
return nil
}
out := new(SuccessPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicyRule) DeepCopyInto(out *SuccessPolicyRule) {
*out = *in
if in.SucceededIndexes != nil {
in, out := &in.SucceededIndexes, &out.SucceededIndexes
*out = new(string)
**out = **in
}
if in.SucceededCount != nil {
in, out := &in.SucceededCount, &out.SucceededCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicyRule.
func (in *SuccessPolicyRule) DeepCopy() *SuccessPolicyRule {
if in == nil {
return nil
}
out := new(SuccessPolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) {
*out = *in
if in.Succeeded != nil {
in, out := &in.Succeeded, &out.Succeeded
*out = make([]types.UID, len(*in))
copy(*out, *in)
}
if in.Failed != nil {
in, out := &in.Failed, &out.Failed
*out = make([]types.UID, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UncountedTerminatedPods.
func (in *UncountedTerminatedPods) DeepCopy() *UncountedTerminatedPods {
if in == nil {
return nil
}
out := new(UncountedTerminatedPods)
in.DeepCopyInto(out)
return out
}

4
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
labels:
- sig/apps

View File

@ -0,0 +1,158 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file should be consistent with pkg/api/v1/annotation_key_constants.go.
package core
const (
// ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy
// webhook backend fails.
ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
// MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
// in the Annotations of a Pod.
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
// TaintsAnnotationKey represents the key of taints data (json serialized)
// in the Annotations of a Node.
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
// to all containers of a pod.
// Deprecated: set a pod security context `seccompProfile` field.
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
// to one container of a pod.
// Deprecated: set a container security context `seccompProfile` field.
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
// SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
// Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
SeccompProfileRuntimeDefault string = "runtime/default"
// DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
// Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
DeprecatedSeccompProfileDockerDefault string = "docker/default"
// DeprecatedAppArmorAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile.
// Deprecated: use a pod or container security context `appArmorProfile` field instead.
DeprecatedAppArmorAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
// DeprecatedAppArmorAnnotationValueRuntimeDefault is the profile specifying the runtime default.
DeprecatedAppArmorAnnotationValueRuntimeDefault = "runtime/default"
// DeprecatedAppArmorAnnotationValueLocalhostPrefix is the prefix for specifying profiles loaded on the node.
DeprecatedAppArmorAnnotationValueLocalhostPrefix = "localhost/"
// DeprecatedAppArmorAnnotationValueUnconfined is the Unconfined AppArmor profile
DeprecatedAppArmorAnnotationValueUnconfined = "unconfined"
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
// in the Annotations of a Node.
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
// ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache
// an object (e.g. secret, config map) before fetching it again from apiserver.
// This annotation can be attached to node.
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
// NonConvertibleAnnotationPrefix annotation key prefix used to identify non-convertible json paths.
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
kubectlPrefix = "kubectl.kubernetes.io/"
// LastAppliedConfigAnnotation is the annotation used to store the previous
// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration"
// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
//
// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
// access only from the CIDRs currently allocated to MIT & the USPS.
//
// Not all cloud providers support this annotation, though AWS & GCE do.
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
// EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that
// represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z')
// of the last change, of some Pod or Service object, that triggered the endpoints object change.
// In other words, if a Pod / Service changed at time T0, that change was observed by endpoints
// controller at T1, and the Endpoints object was changed at T2, the
// EndpointsLastChangeTriggerTime would be set to T0.
//
// The "endpoints change trigger" here means any Pod or Service change that resulted in the
// Endpoints object change.
//
// Given the definition of the "endpoints change trigger", please note that this annotation will
// be set ONLY for endpoints object changes triggered by either Pod or Service change. If the
// Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's
// already set).
//
// This annotation will be used to compute the in-cluster network programming latency SLI, see
// https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md
EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
// EndpointsOverCapacity will be set on an Endpoints resource when it
// exceeds the maximum capacity of 1000 addresses. Initially the Endpoints
// controller will set this annotation with a value of "warning". In a
// future release, the controller may set this annotation with a value of
// "truncated" to indicate that any addresses exceeding the limit of 1000
// have been truncated from the Endpoints resource.
EndpointsOverCapacity = "endpoints.kubernetes.io/over-capacity"
// MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated
// list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode.
// This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or
// CSI Backend for a volume plugin on a specific node.
MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins"
// PodDeletionCost can be used to set to an int32 that represent the cost of deleting
// a pod compared to other pods belonging to the same ReplicaSet. Pods with lower
// deletion cost are preferred to be deleted before pods with higher deletion cost.
// Note that this is honored on a best-effort basis, and so it does not offer guarantees on
// pod deletion order.
// The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted.
//
// This annotation is beta-level and is only honored when PodDeletionCost feature is enabled.
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// DeprecatedAnnotationTopologyAwareHints can be used to enable or disable
// Topology Aware Hints for a Service. This may be set to "Auto" or
// "Disabled". Any other value is treated as "Disabled". This annotation has
// been deprecated in favor of the `service.kubernetes.io/topology-mode`
// annotation which also allows "Auto" and "Disabled", but is not limited to
// those (it's open ended to provide room for experimentation while we
// pursue configuration for topology via specification). When both
// `service.kubernetes.io/topology-aware-hints` and
// `service.kubernetes.io/topology-mode` annotations are set, the value of
// `service.kubernetes.io/topology-aware-hints` has precedence.
DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
// AnnotationTopologyMode can be used to enable or disable Topology Aware
// Routing for a Service. Well known values are "Auto" and "Disabled".
// Implementations may choose to develop new topology approaches, exposing
// them with domain-prefixed values. For example, "example.com/lowest-rtt"
// could be a valid implementation-specific value for this annotation. These
// heuristics will often populate topology hints on EndpointSlices, but that
// is not a requirement.
AnnotationTopologyMode = "service.kubernetes.io/topology-mode"
)

25
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=
// Package core contains the latest (or "internal") version of the
// Kubernetes API objects. This is the API objects as represented in memory.
// The contract presented to clients is located in the versioned packages,
// which are sub-directories. The first one is "v1". Those packages
// describe how a particular version is serialized to storage/network.
package core // import "k8s.io/kubernetes/pkg/apis/core"

View File

@ -0,0 +1,502 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core"
)
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name core.ResourceName) bool {
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix)
}
// IsHugePageResourceValueDivisible returns true if the resource value of storage is
// integer multiple of page size.
func IsHugePageResourceValueDivisible(name core.ResourceName, quantity resource.Quantity) bool {
pageSize, err := HugePageSizeFromResourceName(name)
if err != nil {
return false
}
if pageSize.Sign() <= 0 || pageSize.MilliValue()%int64(1000) != int64(0) {
return false
}
return quantity.Value()%pageSize.Value() == 0
}
// IsQuotaHugePageResourceName returns true if the resource name has the quota
// related huge page resource prefix.
func IsQuotaHugePageResourceName(name core.ResourceName) bool {
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix)
}
// HugePageResourceName returns a ResourceName with the canonical hugepage
// prefix prepended for the specified page size. The page size is converted
// to its canonical representation.
func HugePageResourceName(pageSize resource.Quantity) core.ResourceName {
return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String()))
}
// HugePageSizeFromResourceName returns the page size for the specified huge page
// resource name. If the specified input is not a valid huge page resource name
// an error is returned.
func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) {
if !IsHugePageResourceName(name) {
return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name)
}
pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix)
return resource.ParseQuantity(pageSize)
}
// NonConvertibleFields iterates over the provided map and filters out all but
// any keys with the "non-convertible.kubernetes.io" prefix.
func NonConvertibleFields(annotations map[string]string) map[string]string {
nonConvertibleKeys := map[string]string{}
for key, value := range annotations {
if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) {
nonConvertibleKeys[key] = value
}
}
return nonConvertibleKeys
}
// Semantic can do semantic deep equality checks for core objects.
// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
var Semantic = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b metav1.MicroTime) bool {
return a.UTC() == b.UTC()
},
func(a, b metav1.Time) bool {
return a.UTC() == b.UTC()
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
)
var standardResourceQuotaScopes = sets.New(
core.ResourceQuotaScopeTerminating,
core.ResourceQuotaScopeNotTerminating,
core.ResourceQuotaScopeBestEffort,
core.ResourceQuotaScopeNotBestEffort,
core.ResourceQuotaScopePriorityClass,
)
// IsStandardResourceQuotaScope returns true if the scope is a standard value
func IsStandardResourceQuotaScope(scope core.ResourceQuotaScope) bool {
return standardResourceQuotaScopes.Has(scope) || scope == core.ResourceQuotaScopeCrossNamespacePodAffinity
}
var podObjectCountQuotaResources = sets.New(
core.ResourcePods,
)
var podComputeQuotaResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
)
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource core.ResourceName) bool {
switch scope {
case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort,
core.ResourceQuotaScopePriorityClass, core.ResourceQuotaScopeCrossNamespacePodAffinity:
return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
case core.ResourceQuotaScopeBestEffort:
return podObjectCountQuotaResources.Has(resource)
default:
return true
}
}
var standardContainerResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
)
// IsStandardContainerResourceName returns true if the container can make a resource request
// for the specified resource
func IsStandardContainerResourceName(name core.ResourceName) bool {
return standardContainerResources.Has(name) || IsHugePageResourceName(name)
}
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name core.ResourceName) bool {
if IsNativeResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
return false
}
return true
}
// IsNativeResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsNativeResource(name core.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
strings.Contains(string(name), core.ResourceDefaultNamespacePrefix)
}
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and is not hugepages.
func IsOvercommitAllowed(name core.ResourceName) bool {
return IsNativeResource(name) &&
!IsHugePageResourceName(name)
}
var standardLimitRangeTypes = sets.New(
core.LimitTypePod,
core.LimitTypeContainer,
core.LimitTypePersistentVolumeClaim,
)
// IsStandardLimitRangeType returns true if the type is Pod or Container
func IsStandardLimitRangeType(value core.LimitType) bool {
return standardLimitRangeTypes.Has(value)
}
var standardQuotaResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
core.ResourceRequestsStorage,
core.ResourceRequestsEphemeralStorage,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceLimitsEphemeralStorage,
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourcePersistentVolumeClaims,
core.ResourceConfigMaps,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsStandardQuotaResourceName returns true if the resource is known to
// the quota tracking system
func IsStandardQuotaResourceName(name core.ResourceName) bool {
return standardQuotaResources.Has(name) || IsQuotaHugePageResourceName(name)
}
var standardResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
core.ResourceRequestsEphemeralStorage,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceLimitsEphemeralStorage,
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourceConfigMaps,
core.ResourcePersistentVolumeClaims,
core.ResourceStorage,
core.ResourceRequestsStorage,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsStandardResourceName returns true if the resource is known to the system
func IsStandardResourceName(name core.ResourceName) bool {
return standardResources.Has(name) || IsQuotaHugePageResourceName(name)
}
var integerResources = sets.New(
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourceConfigMaps,
core.ResourcePersistentVolumeClaims,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsIntegerResourceName returns true if the resource is measured in integer values
func IsIntegerResourceName(name core.ResourceName) bool {
return integerResources.Has(name) || IsExtendedResourceName(name)
}
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *core.Service) bool {
// This function assumes that the service is semantically validated
// it does not test if the IP is valid, just makes sure that it is set.
return len(service.Spec.ClusterIP) > 0 &&
service.Spec.ClusterIP != core.ClusterIPNone
}
var standardFinalizers = sets.New(
string(core.FinalizerKubernetes),
metav1.FinalizerOrphanDependents,
metav1.FinalizerDeleteDependents,
)
// IsStandardFinalizerName checks if the input string is a standard finalizer name
func IsStandardFinalizerName(str string) bool {
return standardFinalizers.Has(str)
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP.
func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if ContainsAccessMode(modes, core.ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if ContainsAccessMode(modes, core.ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if ContainsAccessMode(modes, core.ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
if ContainsAccessMode(modes, core.ReadWriteOncePod) {
modesStr = append(modesStr, "RWOP")
}
return strings.Join(modesStr, ",")
}
// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString
func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode {
strmodes := strings.Split(modes, ",")
accessModes := []core.PersistentVolumeAccessMode{}
for _, s := range strmodes {
s = strings.Trim(s, " ")
switch {
case s == "RWO":
accessModes = append(accessModes, core.ReadWriteOnce)
case s == "ROX":
accessModes = append(accessModes, core.ReadOnlyMany)
case s == "RWX":
accessModes = append(accessModes, core.ReadWriteMany)
case s == "RWOP":
accessModes = append(accessModes, core.ReadWriteOncePod)
}
}
return accessModes
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode {
accessModes := []core.PersistentVolumeAccessMode{}
for _, m := range modes {
if !ContainsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
}
}
return accessModes
}
func ContainsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
func ClaimContainsAllocatedResources(pvc *core.PersistentVolumeClaim) bool {
if pvc == nil {
return false
}
if pvc.Status.AllocatedResources != nil {
return true
}
return false
}
func ClaimContainsAllocatedResourceStatus(pvc *core.PersistentVolumeClaim) bool {
if pvc == nil {
return false
}
if pvc.Status.AllocatedResourceStatuses != nil {
return true
}
return false
}
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
// and converts it to the []Toleration type in core.
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) {
var tolerations []core.Toleration
if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations)
if err != nil {
return tolerations, err
}
}
return tolerations, nil
}
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
// Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool {
podTolerations := pod.Spec.Tolerations
var newTolerations []core.Toleration
updated := false
for i := range podTolerations {
if toleration.MatchToleration(&podTolerations[i]) {
if Semantic.DeepEqual(toleration, podTolerations[i]) {
return false
}
newTolerations = append(newTolerations, *toleration)
updated = true
continue
}
newTolerations = append(newTolerations, podTolerations[i])
}
if !updated {
newTolerations = append(newTolerations, *toleration)
}
pod.Spec.Tolerations = newTolerations
return true
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in core.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) {
var taints []core.Taint
if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints)
if err != nil {
return []core.Taint{}, err
}
}
return taints, nil
}
// GetPersistentVolumeClass returns StorageClassName.
func GetPersistentVolumeClass(volume *core.PersistentVolume) string {
// Use beta annotation first
if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found {
return class
}
return volume.Spec.StorageClassName
}
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
// requested, it returns "".
func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string {
// Use beta annotation first
if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found {
return class
}
if claim.Spec.StorageClassName != nil {
return *claim.Spec.StorageClassName
}
return ""
}
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool {
// Use beta annotation first
if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found {
return true
}
if claim.Spec.StorageClassName != nil {
return true
}
return false
}
// GetDeletionCostFromPodAnnotations returns the integer value of pod-deletion-cost. Returns 0
// if not set or the value is invalid.
func GetDeletionCostFromPodAnnotations(annotations map[string]string) (int32, error) {
if value, exist := annotations[core.PodDeletionCost]; exist {
// values that start with plus sign (e.g, "+10") or leading zeros (e.g., "008") are not valid.
if !validFirstDigit(value) {
return 0, fmt.Errorf("invalid value %q", value)
}
i, err := strconv.ParseInt(value, 10, 32)
if err != nil {
// make sure we default to 0 on error.
return 0, err
}
return int32(i), nil
}
return 0, nil
}
func validFirstDigit(str string) bool {
if len(str) == 0 {
return false
}
return str[0] == '-' || (str[0] == '0' && str == "0") || (str[0] >= '1' && str[0] <= '9')
}

View File

@ -0,0 +1,171 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// NOTE: DO NOT use those helper functions through client-go, the
// package path will be changed in the future.
package qos
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return ComputePodQOS(pod)
}
// zeroQuantity represents a resource.Quantity with value "0", used as a baseline
// for resource comparisons.
var zeroQuantity = resource.MustParse("0")
// processResourceList adds non-zero quantities for supported QoS compute resources
// quantities from newList to list.
func processResourceList(list, newList core.ResourceList) {
for name, quantity := range newList {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := list[name]; !exists {
list[name] = delta
} else {
delta.Add(list[name])
list[name] = delta
}
}
}
}
// getQOSResources returns a set of resource names from the provided resource list that:
// 1. Are supported QoS compute resources
// 2. Have quantities greater than zero
func getQOSResources(list core.ResourceList) sets.Set[string] {
qosResources := sets.New[string]()
for name, quantity := range list {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosResources.Insert(string(name))
}
}
return qosResources
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
isGuaranteed := true
// When pod-level resources are specified, we use them to determine QoS class.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
pod.Spec.Resources != nil {
if len(pod.Spec.Resources.Requests) > 0 {
// process requests
processResourceList(requests, pod.Spec.Resources.Requests)
}
if len(pod.Spec.Resources.Limits) > 0 {
// process limits
processResourceList(limits, pod.Spec.Resources.Limits)
qosLimitResources := getQOSResources(pod.Spec.Resources.Limits)
if !qosLimitResources.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
} else {
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
}
if len(requests) == 0 && len(limits) == 0 {
return core.PodQOSBestEffort
}
// Check if requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return core.PodQOSGuaranteed
}
return core.PodQOSBurstable
}

View File

@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- smarterclayton
- deads2k
- caesarxuchao
- liggitt
- dims

View File

@ -0,0 +1,38 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the v1 monolithic api, making it available as an
// option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(core.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion))
}

31
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/json.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import "encoding/json"
// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations
// to prevent anyone from marshaling these internal structs.
var _ = json.Marshaler(&AvoidPods{})
var _ = json.Unmarshaler(&AvoidPods{})
// MarshalJSON panics to prevent marshalling of internal structs
func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") }
// UnmarshalJSON panics to prevent unmarshalling of internal structs
func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") }

View File

@ -0,0 +1,37 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package core
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SetGroupVersionKind sets the API version and kind of the object reference
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind returns the API version and kind of the object reference
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
// GetObjectKind returns the kind of object reference
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }

View File

@ -0,0 +1,97 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pods
import (
"fmt"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/fieldpath"
)
// ContainerVisitorWithPath is called with each container and the field.Path to that container,
// and returns true if visiting should continue.
type ContainerVisitorWithPath func(container *api.Container, path *field.Path) bool
// VisitContainersWithPath invokes the visitor function with a pointer to the spec
// of every container in the given pod spec and the field.Path to that container.
// If visitor returns false, visiting is short-circuited. VisitContainersWithPath returns true if visiting completes,
// false if visiting was short-circuited.
func VisitContainersWithPath(podSpec *api.PodSpec, specPath *field.Path, visitor ContainerVisitorWithPath) bool {
fldPath := specPath.Child("initContainers")
for i := range podSpec.InitContainers {
if !visitor(&podSpec.InitContainers[i], fldPath.Index(i)) {
return false
}
}
fldPath = specPath.Child("containers")
for i := range podSpec.Containers {
if !visitor(&podSpec.Containers[i], fldPath.Index(i)) {
return false
}
}
fldPath = specPath.Child("ephemeralContainers")
for i := range podSpec.EphemeralContainers {
if !visitor((*api.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), fldPath.Index(i)) {
return false
}
}
return true
}
// ConvertDownwardAPIFieldLabel converts the specified downward API field label
// and its value in the pod of the specified version to the internal version,
// and returns the converted label and value. This function returns an error if
// the conversion fails.
func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) {
if version != "v1" {
return "", "", fmt.Errorf("unsupported pod version: %s", version)
}
if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok {
switch path {
case "metadata.annotations", "metadata.labels":
return label, value, nil
default:
return "", "", fmt.Errorf("field label does not support subscript: %s", label)
}
}
switch label {
case "metadata.annotations",
"metadata.labels",
"metadata.name",
"metadata.namespace",
"metadata.uid",
"spec.nodeName",
"spec.restartPolicy",
"spec.serviceAccountName",
"spec.schedulerName",
"status.phase",
"status.hostIP",
"status.hostIPs",
"status.podIP",
"status.podIPs":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
}

102
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/register.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder object to register various known types
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme represents a func that can be used to apply all the registered
// funcs in a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddKnownTypes(SchemeGroupVersion,
&Pod{},
&PodList{},
&PodStatusResult{},
&PodTemplate{},
&PodTemplateList{},
&ReplicationControllerList{},
&ReplicationController{},
&ServiceList{},
&Service{},
&ServiceProxyOptions{},
&NodeList{},
&Node{},
&NodeProxyOptions{},
&Endpoints{},
&EndpointsList{},
&Binding{},
&Event{},
&EventList{},
&List{},
&LimitRange{},
&LimitRangeList{},
&ResourceQuota{},
&ResourceQuotaList{},
&Namespace{},
&NamespaceList{},
&ServiceAccount{},
&ServiceAccountList{},
&Secret{},
&SecretList{},
&PersistentVolume{},
&PersistentVolumeList{},
&PersistentVolumeClaim{},
&PersistentVolumeClaimList{},
&PodAttachOptions{},
&PodLogOptions{},
&PodExecOptions{},
&PodPortForwardOptions{},
&PodProxyOptions{},
&ComponentStatus{},
&ComponentStatusList{},
&SerializedReference{},
&RangeAllocation{},
&ConfigMap{},
&ConfigMapList{},
)
return nil
}

58
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"k8s.io/apimachinery/pkg/api/resource"
)
func (rn ResourceName) String() string {
return string(rn)
}
// CPU returns the CPU limit if specified.
func (rl *ResourceList) CPU() *resource.Quantity {
return rl.Name(ResourceCPU, resource.DecimalSI)
}
// Memory returns the Memory limit if specified.
func (rl *ResourceList) Memory() *resource.Quantity {
return rl.Name(ResourceMemory, resource.BinarySI)
}
// Storage returns the Storage limit if specified.
func (rl *ResourceList) Storage() *resource.Quantity {
return rl.Name(ResourceStorage, resource.BinarySI)
}
// Pods returns the list of pods
func (rl *ResourceList) Pods() *resource.Quantity {
return rl.Name(ResourcePods, resource.DecimalSI)
}
// StorageEphemeral returns the list of ephemeral storage volumes, if any
func (rl *ResourceList) StorageEphemeral() *resource.Quantity {
return rl.Name(ResourceEphemeralStorage, resource.BinarySI)
}
// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format.
func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity {
if val, ok := (*rl)[name]; ok {
return &val
}
return &resource.Quantity{Format: defaultFormat}
}

42
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package core
import "fmt"
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
// if the two taints have same key:effect, regard as they match.
func (t *Taint) MatchTaint(taintToMatch Taint) bool {
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
}
// ToString converts taint struct to string in format '<key>=<value>:<effect>', '<key>=<value>:', '<key>:<effect>', or '<key>'.
func (t *Taint) ToString() string {
if len(t.Effect) == 0 {
if len(t.Value) == 0 {
return fmt.Sprintf("%v", t.Key)
}
return fmt.Sprintf("%v=%v:", t.Key, t.Value)
}
if len(t.Value) == 0 {
return fmt.Sprintf("%v:%v", t.Key, t.Effect)
}
return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
}

View File

@ -0,0 +1,30 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package core
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
// TODO: uniqueness check for tolerations in api validations.
func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
return t.Key == tolerationToMatch.Key &&
t.Effect == tolerationToMatch.Effect &&
t.Operator == tolerationToMatch.Operator &&
t.Value == tolerationToMatch.Value
}

6772
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/types.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

24
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS generated vendored Normal file
View File

@ -0,0 +1,24 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- thockin
- smarterclayton
- wojtek-t
- deads2k
- yujuhong
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- sttts
- dchen1107
- saad-ali
- luxas
- janetkuo
- justinsb
- tallclair
- jsafrane
- dims
- jayunit100
emeritus_reviewers:
- ncdc

View File

@ -0,0 +1,566 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"reflect"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/core"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field conversion funcs.
err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Pod"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"spec.nodeName",
"spec.restartPolicy",
"spec.schedulerName",
"spec.serviceAccountName",
"spec.hostNetwork",
"status.phase",
"status.podIP",
"status.podIPs",
"status.nominatedNodeName":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return err
}
err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Node"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
case "spec.unschedulable":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return err
}
err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ReplicationController"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"status.replicas":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
return err
}
if err := AddFieldLabelConversionsForEvent(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForNamespace(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForSecret(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForService(scheme); err != nil {
return err
}
return nil
}
func Convert_v1_ReplicationController_To_apps_ReplicaSet(in *v1.ReplicationController, out *apps.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
out.Replicas = *in.Replicas
out.MinReadySeconds = in.MinReadySeconds
if in.Selector != nil {
out.Selector = new(metav1.LabelSelector)
metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&in.Selector, out.Selector, s)
}
if in.Template != nil {
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil {
return err
}
}
return nil
}
func Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, apps.ReplicaSetCondition{
Type: apps.ReplicaSetConditionType(cond.Type),
Status: core.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
})
}
return nil
}
func Convert_apps_ReplicaSet_To_v1_ReplicationController(in *apps.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil {
fieldErr, ok := err.(*field.Error)
if !ok {
return err
}
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String()
}
if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *apps.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = new(int32)
*out.Replicas = in.Replicas
out.MinReadySeconds = in.MinReadySeconds
var invalidErr error
if in.Selector != nil {
invalidErr = metav1.Convert_v1_LabelSelector_To_Map_string_To_string(in.Selector, &out.Selector, s)
}
out.Template = new(v1.PodTemplateSpec)
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil {
return err
}
return invalidErr
}
func Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *apps.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, v1.ReplicationControllerCondition{
Type: v1.ReplicationControllerConditionType(cond.Type),
Status: v1.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
})
}
return nil
}
func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = &in.Replicas
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if in.Template != nil {
out.Template = new(v1.PodTemplateSpec)
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error {
if in.Replicas != nil {
out.Replicas = *in.Replicas
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if in.Template != nil {
out.Template = new(core.PodTemplateSpec)
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error {
if err := autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s); err != nil {
return err
}
// If both fields (v1.PodIPs and v1.PodIP) are provided and differ, then PodIP is authoritative for compatibility with older kubelets
if (len(in.PodIP) > 0 && len(in.PodIPs) > 0) && (in.PodIP != in.PodIPs[0].IP) {
out.PodIPs = []core.PodIP{
{
IP: in.PodIP,
},
}
}
// at the this point, autoConvert copied v1.PodIPs -> core.PodIPs
// if v1.PodIPs was empty but v1.PodIP is not, then set core.PodIPs[0] with v1.PodIP
if len(in.PodIP) > 0 && len(in.PodIPs) == 0 {
out.PodIPs = []core.PodIP{
{
IP: in.PodIP,
},
}
}
return nil
}
func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error {
if err := autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s); err != nil {
return err
}
// at the this point autoConvert copied core.PodIPs -> v1.PodIPs
// v1.PodIP (singular value field, which does not exist in core) needs to
// be set with core.PodIPs[0]
if len(in.PodIPs) > 0 {
out.PodIP = in.PodIPs[0].IP
}
return nil
}
// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount
// as an alias for ServiceAccountName.
func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error {
if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil {
return err
}
// DeprecatedServiceAccount is an alias for ServiceAccountName.
out.DeprecatedServiceAccount = in.ServiceAccountName
if in.SecurityContext != nil {
// the host namespace fields have to be handled here for backward compatibility
// with v1.0.0
out.HostPID = in.SecurityContext.HostPID
out.HostNetwork = in.SecurityContext.HostNetwork
out.HostIPC = in.SecurityContext.HostIPC
out.ShareProcessNamespace = in.SecurityContext.ShareProcessNamespace
out.HostUsers = in.SecurityContext.HostUsers
}
return nil
}
func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error {
if err := autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s); err != nil {
return err
}
// at the this point autoConvert copied core.PodCIDRs -> v1.PodCIDRs
// v1.PodCIDR (singular value field, which does not exist in core) needs to
// be set with core.PodCIDRs[0]
if len(in.PodCIDRs) > 0 {
out.PodCIDR = in.PodCIDRs[0]
}
return nil
}
func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error {
if err := autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s); err != nil {
return err
}
// If both fields (v1.PodCIDRs and v1.PodCIDR) are provided and differ, then PodCIDR is authoritative for compatibility with older clients
if (len(in.PodCIDR) > 0 && len(in.PodCIDRs) > 0) && (in.PodCIDR != in.PodCIDRs[0]) {
out.PodCIDRs = []string{in.PodCIDR}
}
// at the this point, autoConvert copied v1.PodCIDRs -> core.PodCIDRs
// if v1.PodCIDRs was empty but v1.PodCIDR is not, then set core.PodCIDRs[0] with v1.PodCIDR
if len(in.PodCIDR) > 0 && len(in.PodCIDRs) == 0 {
out.PodCIDRs = []string{in.PodCIDR}
}
return nil
}
func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil {
return err
}
// We support DeprecatedServiceAccount as an alias for ServiceAccountName.
// If both are specified, ServiceAccountName (the new field) wins.
if in.ServiceAccountName == "" {
out.ServiceAccountName = in.DeprecatedServiceAccount
}
// the host namespace fields have to be handled specially for backward compatibility
// with v1.0.0
if out.SecurityContext == nil {
out.SecurityContext = new(core.PodSecurityContext)
}
out.SecurityContext.HostNetwork = in.HostNetwork
out.SecurityContext.HostPID = in.HostPID
out.SecurityContext.HostIPC = in.HostIPC
out.SecurityContext.ShareProcessNamespace = in.ShareProcessNamespace
out.SecurityContext.HostUsers = in.HostUsers
return nil
}
func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error {
if err := autoConvert_v1_Pod_To_core_Pod(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = ptr.To[int64](1)
}
return nil
}
func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error {
if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = ptr.To[int64](1)
}
return nil
}
func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error {
if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil {
return err
}
// StringData overwrites Data
if len(in.StringData) > 0 {
if out.Data == nil {
out.Data = map[string][]byte{}
}
for k, v := range in.StringData {
out.Data[k] = []byte(v)
}
}
return nil
}
// +k8s:conversion-fn=copy-only
func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error {
if *in == nil {
return nil
}
if *out == nil {
*out = make(core.ResourceList, len(*in))
}
for key, val := range *in {
// Moved to defaults
// TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
// In the future, we should instead reject values that need rounding.
// const milliScale = -3
// val.RoundUp(milliScale)
(*out)[core.ResourceName(key)] = val
}
return nil
}
func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Event"),
func(label, value string) (string, string, error) {
switch label {
case "involvedObject.kind",
"involvedObject.namespace",
"involvedObject.name",
"involvedObject.uid",
"involvedObject.apiVersion",
"involvedObject.resourceVersion",
"involvedObject.fieldPath",
"reason",
"reportingComponent",
"source",
"type",
"metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Namespace"),
func(label, value string) (string, string, error) {
switch label {
case "status.phase",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Secret"),
func(label, value string) (string, string, error) {
switch label {
case "type",
"metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
func AddFieldLabelConversionsForService(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Service"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.namespace",
"metadata.name",
"spec.clusterIP",
"spec.type":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
var initContainerAnnotations = map[string]bool{
"pod.beta.kubernetes.io/init-containers": true,
"pod.alpha.kubernetes.io/init-containers": true,
"pod.beta.kubernetes.io/init-container-statuses": true,
"pod.alpha.kubernetes.io/init-container-statuses": true,
}
// dropInitContainerAnnotations returns a copy of the annotations with init container annotations removed,
// or the original annotations if no init container annotations were present.
//
// this can be removed once no clients prior to 1.8 are supported, and no kubelets prior to 1.8 can be run
// (we don't support kubelets older than 2 versions skewed from the apiserver, but we don't prevent them, either)
func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]string {
if len(oldAnnotations) == 0 {
return oldAnnotations
}
found := false
for k := range initContainerAnnotations {
if _, ok := oldAnnotations[k]; ok {
found = true
break
}
}
if !found {
return oldAnnotations
}
newAnnotations := make(map[string]string, len(oldAnnotations))
for k, v := range oldAnnotations {
if !initContainerAnnotations[k] {
newAnnotations[k] = v
}
}
return newAnnotations
}
// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s)
}
// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s)
}
// Convert_Slice_string_To_Pointer_string is needed because decoding URL parameters requires manual assistance.
func Convert_Slice_string_To_Pointer_string(in *[]string, out **string, s conversion.Scope) error {
if len(*in) == 0 {
return nil
}
temp := (*in)[0]
*out = &temp
return nil
}

View File

@ -0,0 +1,501 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ResourceList(obj *v1.ResourceList) {
for key, val := range *obj {
// TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
// In the future, we should instead reject values that need rounding.
const milliScale = -3
val.RoundUp(milliScale)
(*obj)[v1.ResourceName(key)] = val
}
}
func SetDefaults_ReplicationController(obj *v1.ReplicationController) {
var labels map[string]string
if obj.Spec.Template != nil {
labels = obj.Spec.Template.Labels
}
// TODO: support templates defined elsewhere when we support them in the API
if labels != nil {
if len(obj.Spec.Selector) == 0 {
obj.Spec.Selector = labels
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
}
func SetDefaults_Volume(obj *v1.Volume) {
if ptr.AllPtrFieldsNil(&obj.VolumeSource) {
obj.VolumeSource = v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) && obj.Image != nil && obj.Image.PullPolicy == "" {
// PullPolicy defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
_, tag, _, _ := parsers.ParseImageName(obj.Image.Reference)
if tag == "latest" {
obj.Image.PullPolicy = v1.PullAlways
} else {
obj.Image.PullPolicy = v1.PullIfNotPresent
}
}
}
func SetDefaults_Container(obj *v1.Container) {
if obj.ImagePullPolicy == "" {
// Ignore error and assume it has been validated elsewhere
_, tag, _, _ := parsers.ParseImageName(obj.Image)
// Check image tag
if tag == "latest" {
obj.ImagePullPolicy = v1.PullAlways
} else {
obj.ImagePullPolicy = v1.PullIfNotPresent
}
}
if obj.TerminationMessagePath == "" {
obj.TerminationMessagePath = v1.TerminationMessagePathDefault
}
if obj.TerminationMessagePolicy == "" {
obj.TerminationMessagePolicy = v1.TerminationMessageReadFile
}
}
func SetDefaults_EphemeralContainer(obj *v1.EphemeralContainer) {
SetDefaults_Container((*v1.Container)(&obj.EphemeralContainerCommon))
}
func SetDefaults_Service(obj *v1.Service) {
if obj.Spec.SessionAffinity == "" {
obj.Spec.SessionAffinity = v1.ServiceAffinityNone
}
if obj.Spec.SessionAffinity == v1.ServiceAffinityNone {
obj.Spec.SessionAffinityConfig = nil
}
if obj.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
if obj.Spec.SessionAffinityConfig == nil || obj.Spec.SessionAffinityConfig.ClientIP == nil || obj.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds == nil {
timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds
obj.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{
TimeoutSeconds: &timeoutSeconds,
},
}
}
}
if obj.Spec.Type == "" {
obj.Spec.Type = v1.ServiceTypeClusterIP
}
for i := range obj.Spec.Ports {
sp := &obj.Spec.Ports[i]
if sp.Protocol == "" {
sp.Protocol = v1.ProtocolTCP
}
if sp.TargetPort == intstr.FromInt32(0) || sp.TargetPort == intstr.FromString("") {
sp.TargetPort = intstr.FromInt32(sp.Port)
}
}
// Defaults ExternalTrafficPolicy field for externally-accessible service
// to Global for consistency.
if service.ExternallyAccessible(obj) && obj.Spec.ExternalTrafficPolicy == "" {
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
}
if obj.Spec.InternalTrafficPolicy == nil {
if obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer || obj.Spec.Type == v1.ServiceTypeClusterIP {
serviceInternalTrafficPolicyCluster := v1.ServiceInternalTrafficPolicyCluster
obj.Spec.InternalTrafficPolicy = &serviceInternalTrafficPolicyCluster
}
}
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if obj.Spec.AllocateLoadBalancerNodePorts == nil {
obj.Spec.AllocateLoadBalancerNodePorts = ptr.To(true)
}
}
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) {
ipMode := v1.LoadBalancerIPModeVIP
for i, ing := range obj.Status.LoadBalancer.Ingress {
if ing.IP != "" && ing.IPMode == nil {
obj.Status.LoadBalancer.Ingress[i].IPMode = &ipMode
}
}
}
}
}
func SetDefaults_Pod(obj *v1.Pod) {
// If limits are specified, but requests are not, default requests to limits
// This is done here rather than a more specific defaulting pass on v1.ResourceRequirements
// because we only want this defaulting semantic to take place on a v1.Pod and not a v1.PodTemplate
for i := range obj.Spec.Containers {
// set requests to limits if requests are not specified, but limits are
if obj.Spec.Containers[i].Resources.Limits != nil {
if obj.Spec.Containers[i].Resources.Requests == nil {
obj.Spec.Containers[i].Resources.Requests = make(v1.ResourceList)
}
for key, value := range obj.Spec.Containers[i].Resources.Limits {
if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists {
obj.Spec.Containers[i].Resources.Requests[key] = value.DeepCopy()
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
obj.Spec.Containers[i].Resources.Requests != nil {
// For normal containers, set resize restart policy to default value (NotRequired), if not specified.
resizePolicySpecified := make(map[v1.ResourceName]bool)
for _, p := range obj.Spec.Containers[i].ResizePolicy {
resizePolicySpecified[p.ResourceName] = true
}
setDefaultResizePolicy := func(resourceName v1.ResourceName) {
if _, found := resizePolicySpecified[resourceName]; !found {
obj.Spec.Containers[i].ResizePolicy = append(obj.Spec.Containers[i].ResizePolicy,
v1.ContainerResizePolicy{
ResourceName: resourceName,
RestartPolicy: v1.NotRequired,
})
}
}
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceCPU]; exists {
setDefaultResizePolicy(v1.ResourceCPU)
}
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceMemory]; exists {
setDefaultResizePolicy(v1.ResourceMemory)
}
}
}
for i := range obj.Spec.InitContainers {
if obj.Spec.InitContainers[i].Resources.Limits != nil {
if obj.Spec.InitContainers[i].Resources.Requests == nil {
obj.Spec.InitContainers[i].Resources.Requests = make(v1.ResourceList)
}
for key, value := range obj.Spec.InitContainers[i].Resources.Limits {
if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists {
obj.Spec.InitContainers[i].Resources.Requests[key] = value.DeepCopy()
}
}
}
}
// Pod Requests default values must be applied after container-level default values
// have been populated.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) {
defaultPodRequests(obj)
}
if obj.Spec.EnableServiceLinks == nil {
enableServiceLinks := v1.DefaultEnableServiceLinks
obj.Spec.EnableServiceLinks = &enableServiceLinks
}
if obj.Spec.HostNetwork {
defaultHostNetworkPorts(&obj.Spec.Containers)
defaultHostNetworkPorts(&obj.Spec.InitContainers)
}
}
func SetDefaults_PodSpec(obj *v1.PodSpec) {
// New fields added here will break upgrade tests:
// https://github.com/kubernetes/kubernetes/issues/69445
// In most cases the new defaulted field can added to SetDefaults_Pod instead of here, so
// that it only materializes in the Pod object and not all objects with a PodSpec field.
if obj.DNSPolicy == "" {
obj.DNSPolicy = v1.DNSClusterFirst
}
if obj.RestartPolicy == "" {
obj.RestartPolicy = v1.RestartPolicyAlways
}
if obj.SecurityContext == nil {
obj.SecurityContext = &v1.PodSecurityContext{}
}
if obj.TerminationGracePeriodSeconds == nil {
period := int64(v1.DefaultTerminationGracePeriodSeconds)
obj.TerminationGracePeriodSeconds = &period
}
if obj.SchedulerName == "" {
obj.SchedulerName = v1.DefaultSchedulerName
}
}
func SetDefaults_Probe(obj *v1.Probe) {
if obj.TimeoutSeconds == 0 {
obj.TimeoutSeconds = 1
}
if obj.PeriodSeconds == 0 {
obj.PeriodSeconds = 10
}
if obj.SuccessThreshold == 0 {
obj.SuccessThreshold = 1
}
if obj.FailureThreshold == 0 {
obj.FailureThreshold = 3
}
}
func SetDefaults_SecretVolumeSource(obj *v1.SecretVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.SecretVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_ConfigMapVolumeSource(obj *v1.ConfigMapVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.ConfigMapVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_DownwardAPIVolumeSource(obj *v1.DownwardAPIVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.DownwardAPIVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_Secret(obj *v1.Secret) {
if obj.Type == "" {
obj.Type = v1.SecretTypeOpaque
}
}
func SetDefaults_ProjectedVolumeSource(obj *v1.ProjectedVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.ProjectedVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_ServiceAccountTokenProjection(obj *v1.ServiceAccountTokenProjection) {
hour := int64(time.Hour.Seconds())
if obj.ExpirationSeconds == nil {
obj.ExpirationSeconds = &hour
}
}
func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) {
if obj.Status.Phase == "" {
obj.Status.Phase = v1.VolumePending
}
if obj.Spec.PersistentVolumeReclaimPolicy == "" {
obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain
}
if obj.Spec.VolumeMode == nil {
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) {
if obj.Status.Phase == "" {
obj.Status.Phase = v1.ClaimPending
}
}
func SetDefaults_PersistentVolumeClaimSpec(obj *v1.PersistentVolumeClaimSpec) {
if obj.VolumeMode == nil {
obj.VolumeMode = new(v1.PersistentVolumeMode)
*obj.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_Endpoints(obj *v1.Endpoints) {
for i := range obj.Subsets {
ss := &obj.Subsets[i]
for i := range ss.Ports {
ep := &ss.Ports[i]
if ep.Protocol == "" {
ep.Protocol = v1.ProtocolTCP
}
}
}
}
func SetDefaults_HTTPGetAction(obj *v1.HTTPGetAction) {
if obj.Path == "" {
obj.Path = "/"
}
if obj.Scheme == "" {
obj.Scheme = v1.URISchemeHTTP
}
}
// SetDefaults_Namespace adds a default label for all namespaces
func SetDefaults_Namespace(obj *v1.Namespace) {
// we can't SetDefaults for nameless namespaces (generateName).
// This code needs to be kept in sync with the implementation that exists
// in Namespace Canonicalize strategy (pkg/registry/core/namespace)
// note that this can result in many calls to feature enablement in some cases, but
// we assume that there's no real cost there.
if len(obj.Name) > 0 {
if obj.Labels == nil {
obj.Labels = map[string]string{}
}
obj.Labels[v1.LabelMetadataName] = obj.Name
}
}
func SetDefaults_NamespaceStatus(obj *v1.NamespaceStatus) {
if obj.Phase == "" {
obj.Phase = v1.NamespaceActive
}
}
func SetDefaults_NodeStatus(obj *v1.NodeStatus) {
if obj.Allocatable == nil && obj.Capacity != nil {
obj.Allocatable = make(v1.ResourceList, len(obj.Capacity))
for key, value := range obj.Capacity {
obj.Allocatable[key] = value.DeepCopy()
}
obj.Allocatable = obj.Capacity
}
}
func SetDefaults_ObjectFieldSelector(obj *v1.ObjectFieldSelector) {
if obj.APIVersion == "" {
obj.APIVersion = "v1"
}
}
func SetDefaults_LimitRangeItem(obj *v1.LimitRangeItem) {
// for container limits, we apply default values
if obj.Type == v1.LimitTypeContainer {
if obj.Default == nil {
obj.Default = make(v1.ResourceList)
}
if obj.DefaultRequest == nil {
obj.DefaultRequest = make(v1.ResourceList)
}
// If a default limit is unspecified, but the max is specified, default the limit to the max
for key, value := range obj.Max {
if _, exists := obj.Default[key]; !exists {
obj.Default[key] = value.DeepCopy()
}
}
// If a default limit is specified, but the default request is not, default request to limit
for key, value := range obj.Default {
if _, exists := obj.DefaultRequest[key]; !exists {
obj.DefaultRequest[key] = value.DeepCopy()
}
}
// If a default request is not specified, but the min is provided, default request to the min
for key, value := range obj.Min {
if _, exists := obj.DefaultRequest[key]; !exists {
obj.DefaultRequest[key] = value.DeepCopy()
}
}
}
}
func SetDefaults_ConfigMap(obj *v1.ConfigMap) {
if obj.Data == nil {
obj.Data = make(map[string]string)
}
}
// With host networking default all container ports to host ports.
func defaultHostNetworkPorts(containers *[]v1.Container) {
for i := range *containers {
for j := range (*containers)[i].Ports {
if (*containers)[i].Ports[j].HostPort == 0 {
(*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort
}
}
}
}
func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) {
typeVol := v1.HostPathUnset
if obj.Type == nil {
obj.Type = &typeVol
}
}
func SetDefaults_PodLogOptions(obj *v1.PodLogOptions) {
if utilfeature.DefaultFeatureGate.Enabled(features.PodLogsQuerySplitStreams) {
if obj.Stream == nil {
obj.Stream = ptr.To(v1.LogStreamAll)
}
}
}
// defaultPodRequests applies default values for pod-level requests, only when
// pod-level limits are set, in following scenarios:
// 1. When at least one container (regular, init or sidecar) has requests set:
// The pod-level requests become equal to the effective requests of all containers
// in the pod.
// 2. When no containers have requests set: The pod-level requests become equal to
// pod-level limits.
// This defaulting behavior ensures consistent resource accounting at the pod-level
// while maintaining compatibility with the container-level specifications, as detailed
// in KEP-2837: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#proposed-validation--defaulting-rules
func defaultPodRequests(obj *v1.Pod) {
// We only populate defaults when the pod-level resources are partly specified already.
if obj.Spec.Resources == nil {
return
}
if len(obj.Spec.Resources.Limits) == 0 {
return
}
var podReqs v1.ResourceList
podReqs = obj.Spec.Resources.Requests
if podReqs == nil {
podReqs = make(v1.ResourceList)
}
aggrCtrReqs := resourcehelper.AggregateContainerRequests(obj, resourcehelper.PodResourcesOptions{})
// When containers specify requests for a resource (supported by
// PodLevelResources feature) and pod-level requests are not set, the pod-level requests
// default to the effective requests of all the containers for that resource.
for key, aggrCtrLim := range aggrCtrReqs {
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
podReqs[key] = aggrCtrLim.DeepCopy()
}
}
// When no containers specify requests for a resource, the pod-level requests
// will default to match the pod-level limits, if pod-level
// limits exist for that resource.
for key, podLim := range obj.Spec.Resources.Limits {
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
podReqs[key] = podLim.DeepCopy()
}
}
// Only set pod-level resource requests in the PodSpec if the requirements map
// contains entries after collecting container-level requests and pod-level limits.
if len(podReqs) > 0 {
obj.Spec.Resources.Requests = podReqs
}
}

23
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core
// +k8s:conversion-gen-external-types=k8s.io/api/core/v1
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=k8s.io/api/core/v1
// Package v1 is the v1 version of the API.
package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1"

View File

@ -0,0 +1,337 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name v1.ResourceName) bool {
if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
return false
}
return true
}
// IsPrefixedNativeResource returns true if the resource name is in the
// *kubernetes.io/ namespace.
func IsPrefixedNativeResource(name v1.ResourceName) bool {
return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix)
}
// IsNativeResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsNativeResource(name v1.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
IsPrefixedNativeResource(name)
}
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name v1.ResourceName) bool {
return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
}
// HugePageResourceName returns a ResourceName with the canonical hugepage
// prefix prepended for the specified page size. The page size is converted
// to its canonical representation.
func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName {
return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String()))
}
// HugePageSizeFromResourceName returns the page size for the specified huge page
// resource name. If the specified input is not a valid huge page resource name
// an error is returned.
func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) {
if !IsHugePageResourceName(name) {
return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name)
}
pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix)
return resource.ParseQuantity(pageSize)
}
// HugePageUnitSizeFromByteSize returns hugepage size has the format.
// `size` must be guaranteed to divisible into the largest units that can be expressed.
// <size><unit-prefix>B (1024 = "1KB", 1048576 = "1MB", etc).
func HugePageUnitSizeFromByteSize(size int64) (string, error) {
// hugePageSizeUnitList is borrowed from opencontainers/runc/libcontainer/cgroups/utils.go
var hugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
idx := 0
len := len(hugePageSizeUnitList) - 1
for size%1024 == 0 && idx < len {
size /= 1024
idx++
}
if size > 1024 && idx < len {
return "", fmt.Errorf("size: %d%s must be guaranteed to divisible into the largest units", size, hugePageSizeUnitList[idx])
}
return fmt.Sprintf("%d%s", size, hugePageSizeUnitList[idx]), nil
}
// IsHugePageMedium returns true if the volume medium is in 'HugePages[-size]' format
func IsHugePageMedium(medium v1.StorageMedium) bool {
if medium == v1.StorageMediumHugePages {
return true
}
return strings.HasPrefix(string(medium), string(v1.StorageMediumHugePagesPrefix))
}
// HugePageSizeFromMedium returns the page size for the specified huge page medium.
// If the specified input is not a valid huge page medium an error is returned.
func HugePageSizeFromMedium(medium v1.StorageMedium) (resource.Quantity, error) {
if !IsHugePageMedium(medium) {
return resource.Quantity{}, fmt.Errorf("medium: %s is not a hugepage medium", medium)
}
if medium == v1.StorageMediumHugePages {
return resource.Quantity{}, fmt.Errorf("medium: %s doesn't have size information", medium)
}
pageSize := strings.TrimPrefix(string(medium), string(v1.StorageMediumHugePagesPrefix))
return resource.ParseQuantity(pageSize)
}
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and is not hugepages.
func IsOvercommitAllowed(name v1.ResourceName) bool {
return IsNativeResource(name) &&
!IsHugePageResourceName(name)
}
// IsAttachableVolumeResourceName returns true when the resource name is prefixed in attachable volume
func IsAttachableVolumeResourceName(name v1.ResourceName) bool {
return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix)
}
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *v1.Service) bool {
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP.
func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if ContainsAccessMode(modes, v1.ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if ContainsAccessMode(modes, v1.ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if ContainsAccessMode(modes, v1.ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
if ContainsAccessMode(modes, v1.ReadWriteOncePod) {
modesStr = append(modesStr, "RWOP")
}
return strings.Join(modesStr, ",")
}
// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString
func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode {
strmodes := strings.Split(modes, ",")
accessModes := []v1.PersistentVolumeAccessMode{}
for _, s := range strmodes {
s = strings.Trim(s, " ")
switch {
case s == "RWO":
accessModes = append(accessModes, v1.ReadWriteOnce)
case s == "ROX":
accessModes = append(accessModes, v1.ReadOnlyMany)
case s == "RWX":
accessModes = append(accessModes, v1.ReadWriteMany)
case s == "RWOP":
accessModes = append(accessModes, v1.ReadWriteOncePod)
}
}
return accessModes
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode {
accessModes := []v1.PersistentVolumeAccessMode{}
for _, m := range modes {
if !ContainsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
}
}
return accessModes
}
func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms
func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool {
for _, req := range reqs {
for _, term := range terms {
for _, r := range term.MatchExpressions {
if r.Key == req.Key {
return true
}
}
}
}
return false
}
// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct
// that implements labels.Selector.
func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) {
if len(tsm) == 0 {
return labels.Nothing(), nil
}
selector := labels.NewSelector()
for _, expr := range tsm {
r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values)
if err != nil {
return nil, err
}
selector = selector.Add(*r)
}
return selector, nil
}
// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed;
// nil or empty term matches no objects; while empty term list matches all objects.
func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool {
if len(topologySelectorTerms) == 0 {
// empty term list matches all objects
return true
}
for _, req := range topologySelectorTerms {
// nil or empty term selects no objects
if len(req.MatchLabelExpressions) == 0 {
continue
}
labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions)
if err != nil || !labelSelector.Matches(lbls) {
continue
}
return true
}
return false
}
// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
// Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool {
podTolerations := spec.Tolerations
var newTolerations []v1.Toleration
updated := false
for i := range podTolerations {
if toleration.MatchToleration(&podTolerations[i]) {
if helper.Semantic.DeepEqual(toleration, podTolerations[i]) {
return false
}
newTolerations = append(newTolerations, *toleration)
updated = true
continue
}
newTolerations = append(newTolerations, podTolerations[i])
}
if !updated {
newTolerations = append(newTolerations, *toleration)
}
spec.Tolerations = newTolerations
return true
}
// GetMatchingTolerations returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) {
if len(taints) == 0 {
return true, []v1.Toleration{}
}
if len(tolerations) == 0 && len(taints) > 0 {
return false, []v1.Toleration{}
}
result := []v1.Toleration{}
for i := range taints {
tolerated := false
for j := range tolerations {
if tolerations[j].ToleratesTaint(&taints[i]) {
result = append(result, tolerations[j])
tolerated = true
break
}
}
if !tolerated {
return false, []v1.Toleration{}
}
}
return true, result
}
// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements
// labels.Selector.
func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) {
selector := labels.NewSelector()
var op selection.Operator
switch ssr.Operator {
case v1.ScopeSelectorOpIn:
op = selection.In
case v1.ScopeSelectorOpNotIn:
op = selection.NotIn
case v1.ScopeSelectorOpExists:
op = selection.Exists
case v1.ScopeSelectorOpDoesNotExist:
op = selection.DoesNotExist
default:
return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator)
}
r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values)
if err != nil {
return nil, err
}
selector = selector.Add(*r)
return selector, nil
}

View File

@ -0,0 +1,174 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
// QOSList is a set of (resource name, QoS class) pairs.
type QOSList map[v1.ResourceName]v1.PodQOSClass
func isSupportedQoSComputeResource(name v1.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return ComputePodQOS(pod)
}
// zeroQuantity represents a resource.Quantity with value "0", used as a baseline
// for resource comparisons.
var zeroQuantity = resource.MustParse("0")
// processResourceList adds non-zero quantities for supported QoS compute resources
// quantities from newList to list.
func processResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := list[name]; !exists {
list[name] = delta
} else {
delta.Add(list[name])
list[name] = delta
}
}
}
}
// getQOSResources returns a set of resource names from the provided resource list that:
// 1. Are supported QoS compute resources
// 2. Have quantities greater than zero
func getQOSResources(list v1.ResourceList) sets.Set[string] {
qosResources := sets.New[string]()
for name, quantity := range list {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosResources.Insert(string(name))
}
}
return qosResources
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// TODO(ndixita): Refactor ComputePodQOS into smaller functions to make it more
// readable and maintainable.
func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass {
requests := v1.ResourceList{}
limits := v1.ResourceList{}
isGuaranteed := true
// When pod-level resources are specified, we use them to determine QoS class.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
pod.Spec.Resources != nil {
if len(pod.Spec.Resources.Requests) > 0 {
// process requests
processResourceList(requests, pod.Spec.Resources.Requests)
}
if len(pod.Spec.Resources.Limits) > 0 {
// process limits
processResourceList(limits, pod.Spec.Resources.Limits)
qosLimitResources := getQOSResources(pod.Spec.Resources.Limits)
if !qosLimitResources.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
isGuaranteed = false
}
}
} else {
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []v1.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
isGuaranteed = false
}
}
}
if len(requests) == 0 && len(limits) == 0 {
return v1.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return v1.PodQOSGuaranteed
}
return v1.PodQOSBurstable
}

View File

@ -0,0 +1,46 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- thockin
- smarterclayton
- wojtek-t
- deads2k
- yujuhong
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- sttts
- dchen1107
- janetkuo
- justinsb
- pwittrock
- tallclair
- soltysh
- jsafrane
- dims

View File

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package validation has functions for validating the correctness of api
// objects and explaining what is wrong with them when they aren't valid.
package validation // import "k8s.io/kubernetes/pkg/apis/core/validation"

View File

@ -0,0 +1,193 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"reflect"
"time"
v1 "k8s.io/api/core/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core"
)
const (
ReportingInstanceLengthLimit = 128
ActionLengthLimit = 128
ReasonLengthLimit = 128
NoteLengthLimit = 1024
)
func ValidateEventCreate(event *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
// Make sure events always pass legacy validation.
allErrs := legacyValidateEvent(event, requestVersion)
if requestVersion == v1.SchemeGroupVersion || requestVersion == eventsv1beta1.SchemeGroupVersion {
// No further validation for backwards compatibility.
return allErrs
}
// Strict validation applies to creation via events.k8s.io/v1 API and newer.
allErrs = append(allErrs, ValidateObjectMeta(&event.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
allErrs = append(allErrs, validateV1EventSeries(event)...)
zeroTime := time.Time{}
if event.EventTime.Time == zeroTime {
allErrs = append(allErrs, field.Required(field.NewPath("eventTime"), ""))
}
if event.Type != v1.EventTypeNormal && event.Type != v1.EventTypeWarning {
allErrs = append(allErrs, field.Invalid(field.NewPath("type"), "", fmt.Sprintf("has invalid value: %v", event.Type)))
}
if event.FirstTimestamp.Time != zeroTime {
allErrs = append(allErrs, field.Invalid(field.NewPath("firstTimestamp"), "", "needs to be unset"))
}
if event.LastTimestamp.Time != zeroTime {
allErrs = append(allErrs, field.Invalid(field.NewPath("lastTimestamp"), "", "needs to be unset"))
}
if event.Count != 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("count"), "", "needs to be unset"))
}
if event.Source.Component != "" || event.Source.Host != "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("source"), "", "needs to be unset"))
}
return allErrs
}
func ValidateEventUpdate(newEvent, oldEvent *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
// Make sure the new event always passes legacy validation.
allErrs := legacyValidateEvent(newEvent, requestVersion)
if requestVersion == v1.SchemeGroupVersion || requestVersion == eventsv1beta1.SchemeGroupVersion {
// No further validation for backwards compatibility.
return allErrs
}
// Strict validation applies to update via events.k8s.io/v1 API and newer.
allErrs = append(allErrs, ValidateObjectMetaUpdate(&newEvent.ObjectMeta, &oldEvent.ObjectMeta, field.NewPath("metadata"))...)
// if the series was modified, validate the new data
if !reflect.DeepEqual(newEvent.Series, oldEvent.Series) {
allErrs = append(allErrs, validateV1EventSeries(newEvent)...)
}
allErrs = append(allErrs, ValidateImmutableField(newEvent.InvolvedObject, oldEvent.InvolvedObject, field.NewPath("involvedObject"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Message, oldEvent.Message, field.NewPath("message"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Source, oldEvent.Source, field.NewPath("source"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.FirstTimestamp, oldEvent.FirstTimestamp, field.NewPath("firstTimestamp"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.LastTimestamp, oldEvent.LastTimestamp, field.NewPath("lastTimestamp"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Count, oldEvent.Count, field.NewPath("count"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Type, oldEvent.Type, field.NewPath("type"))...)
// Disallow changes to eventTime greater than microsecond-level precision.
// Tolerating sub-microsecond changes is required to tolerate updates
// from clients that correctly truncate to microsecond-precision when serializing,
// or from clients built with incorrect nanosecond-precision protobuf serialization.
// See https://github.com/kubernetes/kubernetes/issues/111928
newTruncated := newEvent.EventTime.Truncate(time.Microsecond).UTC()
oldTruncated := oldEvent.EventTime.Truncate(time.Microsecond).UTC()
if newTruncated != oldTruncated {
allErrs = append(allErrs, ValidateImmutableField(newEvent.EventTime, oldEvent.EventTime, field.NewPath("eventTime"))...)
}
allErrs = append(allErrs, ValidateImmutableField(newEvent.Action, oldEvent.Action, field.NewPath("action"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Related, oldEvent.Related, field.NewPath("related"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingController, oldEvent.ReportingController, field.NewPath("reportingController"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingInstance, oldEvent.ReportingInstance, field.NewPath("reportingInstance"))...)
return allErrs
}
func validateV1EventSeries(event *core.Event) field.ErrorList {
allErrs := field.ErrorList{}
zeroTime := time.Time{}
if event.Series != nil {
if event.Series.Count < 2 {
allErrs = append(allErrs, field.Invalid(field.NewPath("series.count"), "", "should be at least 2"))
}
if event.Series.LastObservedTime.Time == zeroTime {
allErrs = append(allErrs, field.Required(field.NewPath("series.lastObservedTime"), ""))
}
}
return allErrs
}
// legacyValidateEvent makes sure that the event makes sense.
func legacyValidateEvent(event *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
allErrs := field.ErrorList{}
// Because go
zeroTime := time.Time{}
reportingControllerFieldName := "reportingController"
if requestVersion == v1.SchemeGroupVersion {
reportingControllerFieldName = "reportingComponent"
}
// "New" Events need to have EventTime set, so it's validating old object.
if event.EventTime.Time == zeroTime {
// Make sure event.Namespace and the involvedInvolvedObject.Namespace agree
if len(event.InvolvedObject.Namespace) == 0 {
// event.Namespace must also be empty (or "default", for compatibility with old clients)
if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
} else {
// event namespace must match
if event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
}
} else {
if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceDefault && event.Namespace != metav1.NamespaceSystem {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
if len(event.ReportingController) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath(reportingControllerFieldName), ""))
}
allErrs = append(allErrs, ValidateQualifiedName(event.ReportingController, field.NewPath(reportingControllerFieldName))...)
if len(event.ReportingInstance) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), ""))
}
if len(event.ReportingInstance) > ReportingInstanceLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("reportingInstance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit)))
}
if len(event.Action) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("action"), ""))
}
if len(event.Action) > ActionLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit)))
}
if len(event.Reason) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reason"), ""))
}
if len(event.Reason) > ReasonLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit)))
}
if len(event.Message) > NoteLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit)))
}
}
for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
}
return allErrs
}

View File

@ -0,0 +1,132 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// ValidateSignerName checks that signerName is syntactically valid.
//
// ensure signerName is of the form domain.com/something and up to 571 characters.
// This length and format is specified to accommodate signerNames like:
// <fqdn>/<resource-namespace>.<resource-name>.
// The max length of a FQDN is 253 characters (DNS1123Subdomain max length)
// The max length of a namespace name is 63 characters (DNS1123Label max length)
// The max length of a resource name is 253 characters (DNS1123Subdomain max length)
// We then add an additional 2 characters to account for the one '.' and one '/'.
func ValidateSignerName(fldPath *field.Path, signerName string) field.ErrorList {
var el field.ErrorList
if len(signerName) == 0 {
el = append(el, field.Required(fldPath, ""))
return el
}
segments := strings.Split(signerName, "/")
// validate that there is one '/' in the signerName.
// we do this after validating the domain segment to provide more info to the user.
if len(segments) != 2 {
el = append(el, field.Invalid(fldPath, signerName, "must be a fully qualified domain and path of the form 'example.com/signer-name'"))
// return early here as we should not continue attempting to validate a missing or malformed path segment
// (i.e. one containing multiple or zero `/`)
return el
}
// validate that segments[0] is less than 253 characters altogether
maxDomainSegmentLength := validation.DNS1123SubdomainMaxLength
if len(segments[0]) > maxDomainSegmentLength {
el = append(el, field.TooLong(fldPath, "" /*unused*/, maxDomainSegmentLength))
}
// validate that segments[0] consists of valid DNS1123 labels separated by '.'
domainLabels := strings.Split(segments[0], ".")
for _, lbl := range domainLabels {
// use IsDNS1123Label as we want to ensure the max length of any single label in the domain
// is 63 characters
if errs := validation.IsDNS1123Label(lbl); len(errs) > 0 {
for _, err := range errs {
el = append(el, field.Invalid(fldPath, segments[0], fmt.Sprintf("validating label %q: %s", lbl, err)))
}
// if we encounter any errors whilst parsing the domain segment, break from
// validation as any further error messages will be duplicates, and non-distinguishable
// from each other, confusing users.
break
}
}
// validate that there is at least one '.' in segments[0]
if len(domainLabels) < 2 {
el = append(el, field.Invalid(fldPath, segments[0], "should be a domain with at least two segments separated by dots"))
}
// validate that segments[1] consists of valid DNS1123 subdomains separated by '.'.
pathLabels := strings.Split(segments[1], ".")
for _, lbl := range pathLabels {
// use IsDNS1123Subdomain because it enforces a length restriction of 253 characters
// which is required in order to fit a full resource name into a single 'label'
if errs := validation.IsDNS1123Subdomain(lbl); len(errs) > 0 {
for _, err := range errs {
el = append(el, field.Invalid(fldPath, segments[1], fmt.Sprintf("validating label %q: %s", lbl, err)))
}
// if we encounter any errors whilst parsing the path segment, break from
// validation as any further error messages will be duplicates, and non-distinguishable
// from each other, confusing users.
break
}
}
// ensure that segments[1] can accommodate a dns label + dns subdomain + '.'
maxPathSegmentLength := validation.DNS1123SubdomainMaxLength + validation.DNS1123LabelMaxLength + 1
maxSignerNameLength := maxDomainSegmentLength + maxPathSegmentLength + 1
if len(signerName) > maxSignerNameLength {
el = append(el, field.TooLong(fldPath, "" /*unused*/, maxSignerNameLength))
}
return el
}
// ValidateClusterTrustBundleName checks that a ClusterTrustBundle name conforms
// to the rules documented on the type.
func ValidateClusterTrustBundleName(signerName string) func(name string, prefix bool) []string {
return func(name string, isPrefix bool) []string {
if signerName == "" {
if strings.Contains(name, ":") {
return []string{"ClusterTrustBundle without signer name must not have \":\" in its name"}
}
return apimachineryvalidation.NameIsDNSSubdomain(name, isPrefix)
}
requiredPrefix := strings.ReplaceAll(signerName, "/", ":") + ":"
if !strings.HasPrefix(name, requiredPrefix) {
return []string{fmt.Sprintf("ClusterTrustBundle for signerName %s must be named with prefix %s", signerName, requiredPrefix)}
}
return apimachineryvalidation.NameIsDNSSubdomain(strings.TrimPrefix(name, requiredPrefix), isPrefix)
}
}
func extractSignerNameFromClusterTrustBundleName(name string) (string, bool) {
if splitPoint := strings.LastIndex(name, ":"); splitPoint != -1 {
// This looks like it refers to a signerName trustbundle.
return strings.ReplaceAll(name[:splitPoint], ":", "/"), true
} else {
return "", false
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,24 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- thockin
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- sttts
- saad-ali
- janetkuo
- justinsb
- tallclair
- mwielgus
- soltysh
- dims
- jpbetz
emeritus_reviewers:
- ncdc
labels:
- sig/apps

View File

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package extensions // import "k8s.io/kubernetes/pkg/apis/extensions"

View File

@ -0,0 +1,67 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package extensions
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
)
// GroupName is the group name use in this package
const GroupName = "extensions"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Builds new Scheme of known types
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&apps.Deployment{},
&apps.DeploymentList{},
&apps.DeploymentRollback{},
&apps.DaemonSetList{},
&apps.DaemonSet{},
&networking.Ingress{},
&networking.IngressList{},
&apps.ReplicaSet{},
&apps.ReplicaSetList{},
&autoscaling.Scale{},
&networking.NetworkPolicy{},
&networking.NetworkPolicyList{},
)
return nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental
types in kubernetes. These API objects are experimental, meaning that the
APIs may be broken at any time by the kubernetes team.
DISCLAIMER: The implementation of the experimental API group itself is
a temporary one meant as a stopgap solution until kubernetes has proper
support for multiple API groups. The transition may require changes
beyond registration differences. In other words, experimental API group
support is experimental.
*/
package extensions

View File

@ -0,0 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package extensions

View File

@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-network-api-approvers
reviewers:
- sig-network-api-reviewers
labels:
- sig/network

View File

@ -0,0 +1,20 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=networking.k8s.io
package networking // import "k8s.io/kubernetes/pkg/apis/networking"

View File

@ -0,0 +1,61 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networking
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "networking.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&NetworkPolicy{},
&NetworkPolicyList{},
&Ingress{},
&IngressList{},
&IngressClass{},
&IngressClassList{},
&IPAddress{},
&IPAddressList{},
&ServiceCIDR{},
&ServiceCIDRList{},
)
return nil
}

View File

@ -0,0 +1,694 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networking
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy describes what network traffic is allowed for a set of pods
type NetworkPolicy struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// spec represents the specification of the desired behavior for this NetworkPolicy.
// +optional
Spec NetworkPolicySpec
}
// PolicyType describes the NetworkPolicy type
// This type is beta-level in 1.8
type PolicyType string
const (
// PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods
PolicyTypeIngress PolicyType = "Ingress"
// PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods
PolicyTypeEgress PolicyType = "Egress"
)
// NetworkPolicySpec provides the specification of a NetworkPolicy
type NetworkPolicySpec struct {
// podSelector selects the pods to which this NetworkPolicy object applies.
// The array of ingress rules is applied to any pods selected by this field.
// Multiple network policies can select the same set of pods. In this case,
// the ingress rules for each are combined additively.
// This field is NOT optional and follows standard label selector semantics.
// An empty podSelector matches all pods in this namespace.
PodSelector metav1.LabelSelector
// ingress is a list of ingress rules to be applied to the selected pods.
// Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
// (and cluster policy otherwise allows the traffic), OR if the traffic source is
// the pod's local node, OR if the traffic matches at least one ingress rule
// across all of the NetworkPolicy objects whose podSelector matches the pod. If
// this field is empty then this NetworkPolicy does not allow any traffic (and serves
// solely to ensure that the pods it selects are isolated by default)
// +optional
Ingress []NetworkPolicyIngressRule
// egress is a list of egress rules to be applied to the selected pods. Outgoing traffic
// is allowed if there are no NetworkPolicies selecting the pod (and cluster policy
// otherwise allows the traffic), OR if the traffic matches at least one egress rule
// across all of the NetworkPolicy objects whose podSelector matches the pod. If
// this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
// solely to ensure that the pods it selects are isolated by default).
// This field is beta-level in 1.8
// +optional
Egress []NetworkPolicyEgressRule
// policyTypes is a list of rule types that the NetworkPolicy relates to.
// Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
// If this field is not specified, it will default based on the existence of ingress or egress rules;
// policies that contain an egress section are assumed to affect egress, and all policies
// (whether or not they contain an ingress section) are assumed to affect ingress.
// If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
// Likewise, if you want to write a policy that specifies that no egress is allowed,
// you must specify a policyTypes value that include "Egress" (since such a policy would not include
// an egress section and would otherwise default to just [ "Ingress" ]).
// This field is beta-level in 1.8
// +optional
PolicyTypes []PolicyType
}
// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
type NetworkPolicyIngressRule struct {
// ports is a list of ports which should be made accessible on the pods selected for
// this rule. Each item in this list is combined using a logical OR. If this field is
// empty or missing, this rule matches all ports (traffic not restricted by port).
// If this field is present and contains at least one item, then this rule allows
// traffic only if the traffic matches at least one port in the list.
// +optional
Ports []NetworkPolicyPort
// from is a list of sources which should be able to access the pods selected for this rule.
// Items in this list are combined using a logical OR operation. If this field is
// empty or missing, this rule matches all sources (traffic not restricted by
// source). If this field is present and contains at least one item, this rule
// allows traffic only if the traffic matches at least one item in the from list.
// +optional
From []NetworkPolicyPeer
}
// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
// This type is beta-level in 1.8
type NetworkPolicyEgressRule struct {
// ports is a list of destination ports for outgoing traffic.
// Each item in this list is combined using a logical OR. If this field is
// empty or missing, this rule matches all ports (traffic not restricted by port).
// If this field is present and contains at least one item, then this rule allows
// traffic only if the traffic matches at least one port in the list.
// +optional
Ports []NetworkPolicyPort
// to is a list of destinations for outgoing traffic of pods selected for this rule.
// Items in this list are combined using a logical OR operation. If this field is
// empty or missing, this rule matches all destinations (traffic not restricted by
// destination). If this field is present and contains at least one item, this rule
// allows traffic only if the traffic matches at least one item in the to list.
// +optional
To []NetworkPolicyPeer
}
// NetworkPolicyPort describes a port to allow traffic on
type NetworkPolicyPort struct {
// protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match.
// If not specified, this field defaults to TCP.
// +optional
Protocol *api.Protocol
// port represents the port on the given protocol. This can either be a numerical or named
// port on a pod. If this field is not provided, this matches all port names and
// numbers.
// If present, only traffic on the specified protocol AND port will be matched.
// +optional
Port *intstr.IntOrString
// endPort indicates that the range of ports from port to endPort if set, inclusive,
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
// +optional
EndPort *int32
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
// that should not be included within this rule.
type IPBlock struct {
// cidr is a string representing the IPBlock
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
CIDR string
// except is a list of CIDRs that should not be included within the IPBlock
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
// Except values will be rejected if they are outside the cidr range
// +optional
Except []string
}
// NetworkPolicyPeer describes a peer to allow traffic to/from.
type NetworkPolicyPeer struct {
// podSelector is a label selector which selects pods. This field follows standard label
// selector semantics; if present but empty, it selects all pods.
//
// If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
// the pods matching podSelector in the Namespaces selected by namespaceSelector.
// Otherwise it selects the pods matching podSelector in the policy's own namespace.
// +optional
PodSelector *metav1.LabelSelector
// namespaceSelector selects namespaces using cluster-scoped labels. This field follows
// standard label selector semantics; if present but empty, it selects all namespaces.
//
// If podSelector is also set, then the NetworkPolicyPeer as a whole selects
// the pods matching podSelector in the namespaces selected by namespaceSelector.
// Otherwise it selects all pods in the namespaces selected by namespaceSelector.
// +optional
NamespaceSelector *metav1.LabelSelector
// ipBlock defines policy on a particular IPBlock. If this field is set then
// neither of the other fields can be.
// +optional
IPBlock *IPBlock
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []NetworkPolicy
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Ingress is a collection of rules that allow inbound connections to reach the
// endpoints defined by a backend. An Ingress can be configured to give services
// externally-reachable urls, load balance traffic, terminate SSL, offer name
// based virtual hosting etc.
type Ingress struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// spec is the desired state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec IngressSpec
// status is the current state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status IngressStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IngressList is a collection of Ingress.
type IngressList struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// items is the list of Ingress.
Items []Ingress
}
// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpec struct {
// ingressClassName is the name of the IngressClass cluster resource. The
// associated IngressClass defines which controller will implement the
// resource. This replaces the deprecated `kubernetes.io/ingress.class`
// annotation. For backwards compatibility, when that annotation is set, it
// must be given precedence over this field. The controller may emit a
// warning if the field and annotation have different values.
// Implementations of this API should ignore Ingresses without a class
// specified. An IngressClass resource may be marked as default, which can
// be used to set a default value for this field. For more information,
// refer to the IngressClass documentation.
// +optional
IngressClassName *string
// defaultBackend is the backend that should handle requests that don't
// match any rule. If Rules are not specified, DefaultBackend must be specified.
// If DefaultBackend is not set, the handling of requests that do not match any
// of the rules will be up to the Ingress controller.
// +optional
DefaultBackend *IngressBackend
// tls represents the TLS configuration. Currently the ingress only supports a
// single TLS port, 443. If multiple members of this list specify different hosts,
// they will be multiplexed on the same port according to the hostname specified
// through the SNI TLS extension, if the ingress controller fulfilling the
// ingress supports SNI.
// +listType=atomic
// +optional
TLS []IngressTLS
// rules is a list of host rules used to configure the Ingress. If unspecified, or
// no rule matches, all traffic is sent to the default backend.
// +listType=atomic
// +optional
Rules []IngressRule
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IngressClass represents the class of the Ingress, referenced by the Ingress
// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
// used to indicate that an IngressClass should be considered default. When a
// single IngressClass resource has this annotation set to true, new Ingress
// resources without a class specified will be assigned this default class.
type IngressClass struct {
metav1.TypeMeta
metav1.ObjectMeta
// spec is the desired state of the IngressClass.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec IngressClassSpec
}
// IngressClassSpec provides information about the class of an Ingress.
type IngressClassSpec struct {
// controller refers to the name of the controller that should handle this
// class. This allows for different "flavors" that are controlled by the
// same controller. For example, you may have different parameters for the
// same implementing controller. This should be specified as a
// domain-prefixed path no more than 250 characters in length, e.g.
// "acme.io/ingress-controller". This field is immutable.
Controller string
// parameters is a link to a custom resource containing additional
// configuration for the controller. This is optional if the controller does
// not require extra parameters.
// +optional
Parameters *IngressClassParametersReference
}
const (
// IngressClassParametersReferenceScopeNamespace indicates that the
// referenced Parameters resource is namespace-scoped.
IngressClassParametersReferenceScopeNamespace = "Namespace"
// IngressClassParametersReferenceScopeCluster indicates that the
// referenced Parameters resource is cluster-scoped.
IngressClassParametersReferenceScopeCluster = "Cluster"
)
// IngressClassParametersReference identifies an API object. This can be used
// to specify a cluster or namespace-scoped resource.
type IngressClassParametersReference struct {
// apiGroup is the group for the resource being referenced. If apiGroup is
// not specified, the specified kind must be in the core API group. For any
// other third-party types, apiGroup is required.
// +optional
APIGroup *string
// kind is the type of resource being referenced.
Kind string
// name is the name of resource being referenced.
Name string
// scope represents if this refers to a cluster or namespace scoped resource.
// This may be set to "Cluster" (default) or "Namespace".
// +optional
Scope *string
// namespace is the namespace of the resource being referenced. This field is
// required when scope is set to "Namespace" and must be unset when scope is set to
// "Cluster".
// +optional
Namespace *string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IngressClassList is a collection of IngressClasses.
type IngressClassList struct {
metav1.TypeMeta
// Standard object's metadata.
// +optional
metav1.ListMeta
// items is the list of IngressClasses.
Items []IngressClass
}
// IngressTLS describes the transport layer security associated with an ingress.
type IngressTLS struct {
// hosts is a list of hosts included in the TLS certificate. The values in
// this list must match the name/s used in the tlsSecret. Defaults to the
// wildcard host setting for the loadbalancer controller fulfilling this
// Ingress, if left unspecified.
// +listType=atomic
// +optional
Hosts []string
// secretName is the name of the secret used to terminate TLS traffic on
// port 443. Field is left optional to allow TLS routing based on SNI
// hostname alone. If the SNI host in a listener conflicts with the "Host"
// header field used by an IngressRule, the SNI host is used for termination
// and value of the "Host" header is used for routing.
// +optional
SecretName string
// TODO: Consider specifying different modes of termination, protocols etc.
}
// IngressStatus describes the current state of the Ingress.
type IngressStatus struct {
// loadBalancer contains the current status of the load-balancer.
// +optional
LoadBalancer IngressLoadBalancerStatus
}
// IngressLoadBalancerStatus represents the status of a load-balancer
type IngressLoadBalancerStatus struct {
// ingress is a list containing ingress points for the load-balancer.
// +optional
Ingress []IngressLoadBalancerIngress
}
// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngress struct {
// ip is set for load-balancer ingress points that are IP based.
// +optional
IP string
// hostname is set for load-balancer ingress points that are DNS based.
// +optional
Hostname string
// ports provides information about the ports exposed by this LoadBalancer.
// +optional
Ports []IngressPortStatus
}
// IngressPortStatus represents the error condition of an ingress port
type IngressPortStatus struct {
// port is the port number of the ingress port.
Port int32
// protocol is the protocol of the ingress port.
Protocol api.Protocol
// error indicates a problem on this port.
// The format of the error must comply with the following rules:
// - Kubernetes-defined error values use CamelCase names
// - Provider-specific error values must follow label-name style (e.g.
// example.com/name).
Error *string
}
// IngressRule represents the rules mapping the paths under a specified host to
// the related backend services. Incoming requests are first evaluated for a
// host match, then routed to the backend associated with the matching
// IngressRuleValue.
type IngressRule struct {
// host is the fully qualified domain name of a network host, as defined by RFC 3986.
// Note the following deviations from the "host" part of the
// URI as defined in RFC 3986:
// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
// the IP in the Spec of the parent Ingress.
// 2. The `:` delimiter is not respected because ports are not allowed.
// Currently the port of an Ingress is implicitly :80 for http and
// :443 for https.
// Both these may change in the future.
// Incoming requests are matched against the host before the
// IngressRuleValue. If the host is unspecified, the Ingress routes all
// traffic based on the specified IngressRuleValue.
//
// host can be "precise" which is a domain name without the terminating dot of
// a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
// prefixed with a single wildcard label (e.g. "*.foo.com").
// The wildcard character '*' must appear by itself as the first DNS label and
// matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
// Requests will be matched against the host field in the following way:
// 1. If host is precise, the request matches this rule if the http host header is equal to Host.
// 2. If host is a wildcard, then the request matches this rule if the http host header
// is to equal to the suffix (removing the first label) of the wildcard rule.
// +optional
Host string
// IngressRuleValue represents a rule to route requests for this
// IngressRule. If unspecified, the rule defaults to a http catch-all.
// Whether that sends just traffic matching the host to the default backend
// or all traffic to the default backend, is left to the controller
// fulfilling the Ingress. Http is currently the only supported
// IngressRuleValue.
// +optional
IngressRuleValue
}
// IngressRuleValue represents a rule to apply against incoming requests. If the
// rule is satisfied, the request is routed to the specified backend. Currently
// mixing different types of rules in a single Ingress is disallowed, so exactly
// one of the following must be set.
type IngressRuleValue struct {
//TODO:
// 1. Consider renaming this resource and the associated rules so they
// aren't tied to Ingress. They can be used to route intra-cluster traffic.
// 2. Consider adding fields for ingress-type specific global options
// usable by a loadbalancer, like http keep-alive.
// +optional
HTTP *HTTPIngressRuleValue
}
// HTTPIngressRuleValue is a list of http selectors pointing to backends.
// In the example: http://<host>/<path>?<searchpart> -> backend where
// where parts of the url correspond to RFC 3986, this resource will be used
// to match against everything after the last '/' and before the first '?'
// or '#'.
type HTTPIngressRuleValue struct {
// paths is a collection of paths that map requests to backends.
// +listType=atomic
Paths []HTTPIngressPath
// TODO: Consider adding fields for ingress-type specific global
// options usable by a loadbalancer, like http keep-alive.
}
// PathType represents the type of path referred to by a HTTPIngressPath.
type PathType string
const (
// PathTypeExact matches the URL path exactly and with case sensitivity.
PathTypeExact = PathType("Exact")
// PathTypePrefix matches based on a URL path prefix split by '/'. Matching
// is case sensitive and done on a path element by element basis. A path
// element refers to the list of labels in the path split by the '/'
// separator. A request is a match for path p if every p is an element-wise
// prefix of p of the request path. Note that if the last element of the
// path is a substring of the last element in request path, it is not a
// match (e.g. /foo/bar matches /foo/bar/baz, but does not match
// /foo/barbaz). If multiple matching paths exist in an Ingress spec, the
// longest matching path is given priority.
// Examples:
// - /foo/bar does not match requests to /foo/barbaz
// - /foo/bar matches request to /foo/bar and /foo/bar/baz
// - /foo and /foo/ both match requests to /foo and /foo/. If both paths are
// present in an Ingress spec, the longest matching path (/foo/) is given
// priority.
PathTypePrefix = PathType("Prefix")
// PathTypeImplementationSpecific matching is up to the IngressClass.
// Implementations can treat this as a separate PathType or treat it
// identically to Prefix or Exact path types.
PathTypeImplementationSpecific = PathType("ImplementationSpecific")
)
// HTTPIngressPath associates a path with a backend. Incoming urls matching the
// path are forwarded to the backend.
type HTTPIngressPath struct {
// path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
// as defined by RFC 3986. Paths must begin with a '/' and must be present
// when using PathType with value "Exact" or "Prefix".
// +optional
Path string
// pathType determines the interpretation of the path matching. PathType can
// be one of Exact, Prefix, or ImplementationSpecific. Implementations are
// required to support all path types.
// +optional
PathType *PathType
// backend defines the referenced service endpoint to which the traffic
// will be forwarded to.
Backend IngressBackend
}
// IngressBackend describes all endpoints for a given service and port.
type IngressBackend struct {
// service references a service as a backend.
// This is a mutually exclusive setting with "Resource".
// +optional
Service *IngressServiceBackend
// resource is an ObjectRef to another Kubernetes resource in the namespace
// of the Ingress object. If resource is specified, a service.Name and
// service.Port must not be specified.
// This is a mutually exclusive setting with "Service".
// +optional
Resource *api.TypedLocalObjectReference
}
// IngressServiceBackend references a Kubernetes Service as a Backend.
type IngressServiceBackend struct {
// name is the referenced service. The service must exist in
// the same namespace as the Ingress object.
Name string
// port of the referenced service. A port name or port number
// is required for a IngressServiceBackend.
Port ServiceBackendPort
}
// ServiceBackendPort is the service port being referenced.
type ServiceBackendPort struct {
// name is the name of the port on the Service.
// This must be an IANA_SVC_NAME (following RFC6335).
// This is a mutually exclusive setting with "Number".
// +optional
Name string
// number is the numerical port number (e.g. 80) on the Service.
// This is a mutually exclusive setting with "Name".
// +optional
Number int32
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
// the name of the object is the IP address in canonical format, four decimal digits separated
// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
type IPAddress struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// +optional
Spec IPAddressSpec
}
// IPAddressSpec describe the attributes in an IP Address,
type IPAddressSpec struct {
// ParentRef references the resource that an IPAddress is attached to.
// An IPAddress must reference a parent object.
// +required
ParentRef *ParentReference
}
type ParentReference struct {
// Group is the group of the object being referenced.
Group string
// Resource is the resource of the object being referenced.
Resource string
// Namespace is the namespace of the object being referenced.
Namespace string
// Name is the name of the object being referenced.
Name string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddressList contains a list of IPAddress.
type IPAddressList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// Items is the list of IPAddress
Items []IPAddress
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
// This range is used to allocate ClusterIPs to Service objects.
type ServiceCIDR struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// spec is the desired state of the ServiceCIDR.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ServiceCIDRSpec
// status represents the current state of the ServiceCIDR.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ServiceCIDRStatus
}
type ServiceCIDRSpec struct {
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
// This field is immutable.
// +optional
CIDRs []string
}
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
type ServiceCIDRStatus struct {
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
Conditions []metav1.Condition
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.27
// ServiceCIDRList contains a list of ServiceCIDR objects.
type ServiceCIDRList struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// items is the list of ServiceCIDRs.
Items []ServiceCIDR
}

View File

@ -0,0 +1,930 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package networking
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
*out = *in
if in.PathType != nil {
in, out := &in.PathType, &out.PathType
*out = new(PathType)
**out = **in
}
in.Backend.DeepCopyInto(&out.Backend)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath.
func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath {
if in == nil {
return nil
}
out := new(HTTPIngressPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
*out = *in
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]HTTPIngressPath, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue.
func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
if in == nil {
return nil
}
out := new(HTTPIngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddress) DeepCopyInto(out *IPAddress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
func (in *IPAddress) DeepCopy() *IPAddress {
if in == nil {
return nil
}
out := new(IPAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
func (in *IPAddressList) DeepCopy() *IPAddressList {
if in == nil {
return nil
}
out := new(IPAddressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
*out = *in
if in.ParentRef != nil {
in, out := &in.ParentRef, &out.ParentRef
*out = new(ParentReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
if in == nil {
return nil
}
out := new(IPAddressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
if in.Except != nil {
in, out := &in.Except, &out.Except
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
func (in *IPBlock) DeepCopy() *IPBlock {
if in == nil {
return nil
}
out := new(IPBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ingress) DeepCopyInto(out *Ingress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
func (in *Ingress) DeepCopy() *Ingress {
if in == nil {
return nil
}
out := new(Ingress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
*out = *in
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(IngressServiceBackend)
**out = **in
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(core.TypedLocalObjectReference)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend.
func (in *IngressBackend) DeepCopy() *IngressBackend {
if in == nil {
return nil
}
out := new(IngressBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClass) DeepCopyInto(out *IngressClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass.
func (in *IngressClass) DeepCopy() *IngressClass {
if in == nil {
return nil
}
out := new(IngressClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClassList) DeepCopyInto(out *IngressClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IngressClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList.
func (in *IngressClassList) DeepCopy() *IngressClassList {
if in == nil {
return nil
}
out := new(IngressClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClassParametersReference) DeepCopyInto(out *IngressClassParametersReference) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
if in.Scope != nil {
in, out := &in.Scope, &out.Scope
*out = new(string)
**out = **in
}
if in.Namespace != nil {
in, out := &in.Namespace, &out.Namespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassParametersReference.
func (in *IngressClassParametersReference) DeepCopy() *IngressClassParametersReference {
if in == nil {
return nil
}
out := new(IngressClassParametersReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) {
*out = *in
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = new(IngressClassParametersReference)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec.
func (in *IngressClassSpec) DeepCopy() *IngressClassSpec {
if in == nil {
return nil
}
out := new(IngressClassSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressList) DeepCopyInto(out *IngressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Ingress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
func (in *IngressList) DeepCopy() *IngressList {
if in == nil {
return nil
}
out := new(IngressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]IngressPortStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress.
func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress {
if in == nil {
return nil
}
out := new(IngressLoadBalancerIngress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]IngressLoadBalancerIngress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus.
func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus {
if in == nil {
return nil
}
out := new(IngressLoadBalancerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) {
*out = *in
if in.Error != nil {
in, out := &in.Error, &out.Error
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus.
func (in *IngressPortStatus) DeepCopy() *IngressPortStatus {
if in == nil {
return nil
}
out := new(IngressPortStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in
in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
func (in *IngressRule) DeepCopy() *IngressRule {
if in == nil {
return nil
}
out := new(IngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) {
*out = *in
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(HTTPIngressRuleValue)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue.
func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
if in == nil {
return nil
}
out := new(IngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressServiceBackend) DeepCopyInto(out *IngressServiceBackend) {
*out = *in
out.Port = in.Port
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressServiceBackend.
func (in *IngressServiceBackend) DeepCopy() *IngressServiceBackend {
if in == nil {
return nil
}
out := new(IngressServiceBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
*out = *in
if in.IngressClassName != nil {
in, out := &in.IngressClassName, &out.IngressClassName
*out = new(string)
**out = **in
}
if in.DefaultBackend != nil {
in, out := &in.DefaultBackend, &out.DefaultBackend
*out = new(IngressBackend)
(*in).DeepCopyInto(*out)
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = make([]IngressTLS, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]IngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
func (in *IngressSpec) DeepCopy() *IngressSpec {
if in == nil {
return nil
}
out := new(IngressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
func (in *IngressStatus) DeepCopy() *IngressStatus {
if in == nil {
return nil
}
out := new(IngressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressTLS) DeepCopyInto(out *IngressTLS) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS.
func (in *IngressTLS) DeepCopy() *IngressTLS {
if in == nil {
return nil
}
out := new(IngressTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
if in == nil {
return nil
}
out := new(NetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.To != nil {
in, out := &in.To, &out.To
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyEgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyIngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
if in == nil {
return nil
}
out := new(NetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
*out = *in
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.IPBlock != nil {
in, out := &in.IPBlock, &out.IPBlock
*out = new(IPBlock)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
if in == nil {
return nil
}
out := new(NetworkPolicyPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
*out = *in
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(core.Protocol)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(intstr.IntOrString)
**out = **in
}
if in.EndPort != nil {
in, out := &in.EndPort, &out.EndPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
if in == nil {
return nil
}
out := new(NetworkPolicyPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
*out = *in
in.PodSelector.DeepCopyInto(&out.PodSelector)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]NetworkPolicyIngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]NetworkPolicyEgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PolicyTypes != nil {
in, out := &in.PolicyTypes, &out.PolicyTypes
*out = make([]PolicyType, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
if in == nil {
return nil
}
out := new(NetworkPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParentReference) DeepCopyInto(out *ParentReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
func (in *ParentReference) DeepCopy() *ParentReference {
if in == nil {
return nil
}
out := new(ParentReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBackendPort.
func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
if in == nil {
return nil
}
out := new(ServiceBackendPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
if in == nil {
return nil
}
out := new(ServiceCIDR)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ServiceCIDR, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
if in == nil {
return nil
}
out := new(ServiceCIDRList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
if in == nil {
return nil
}
out := new(ServiceCIDRSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
if in == nil {
return nil
}
out := new(ServiceCIDRStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,20 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=scheduling.k8s.io
package scheduling // import "k8s.io/kubernetes/pkg/apis/scheduling"

View File

@ -0,0 +1,53 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "scheduling.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PriorityClass{},
&PriorityClassList{},
)
return nil
}

View File

@ -0,0 +1,90 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/core"
)
const (
// DefaultPriorityWhenNoDefaultClassExists is used to set priority of pods
// that do not specify any priority class and there is no priority class
// marked as default.
DefaultPriorityWhenNoDefaultClassExists = 0
// HighestUserDefinablePriority is the highest priority for user defined priority classes. Priority values larger than 1 billion are reserved for Kubernetes system use.
HighestUserDefinablePriority = int32(1000000000)
// SystemCriticalPriority is the beginning of the range of priority values for critical system components.
SystemCriticalPriority = 2 * HighestUserDefinablePriority
// SystemPriorityClassPrefix is the prefix reserved for system priority class names. Other priority
// classes are not allowed to start with this prefix.
// NOTE: In order to avoid conflict of names with user-defined priority classes, all the names must
// start with SystemPriorityClassPrefix.
SystemPriorityClassPrefix = "system-"
// SystemClusterCritical is the system priority class name that represents cluster-critical.
SystemClusterCritical = SystemPriorityClassPrefix + "cluster-critical"
// SystemNodeCritical is the system priority class name that represents node-critical.
SystemNodeCritical = SystemPriorityClassPrefix + "node-critical"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PriorityClass defines the mapping from a priority class name to the priority
// integer value. The value can be any valid integer.
type PriorityClass struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// value represents the integer value of this priority class. This is the actual priority that pods
// receive when they have the name of this class in their pod spec.
Value int32
// globalDefault specifies whether this PriorityClass should be considered as
// the default priority for pods that do not have any priority class.
// Only one PriorityClass can be marked as `globalDefault`. However, if more than
// one PriorityClasses exists with their `globalDefault` field set to true,
// the smallest value of such global default PriorityClasses will be used as the default priority.
// +optional
GlobalDefault bool
// description is an arbitrary string that usually provides guidelines on
// when this priority class should be used.
// +optional
Description string
// preemptionPolicy it the Policy for preempting pods with lower priority.
// One of Never, PreemptLowerPriority.
// Defaults to PreemptLowerPriority if unset.
// +optional
PreemptionPolicy *core.PreemptionPolicy
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PriorityClassList is a collection of priority classes.
type PriorityClassList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// items is the list of PriorityClasses.
Items []PriorityClass
}

View File

@ -0,0 +1,91 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package scheduling
import (
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClass) DeepCopyInto(out *PriorityClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.PreemptionPolicy != nil {
in, out := &in.PreemptionPolicy, &out.PreemptionPolicy
*out = new(core.PreemptionPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass.
func (in *PriorityClass) DeepCopy() *PriorityClass {
if in == nil {
return nil
}
out := new(PriorityClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PriorityClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList.
func (in *PriorityClassList) DeepCopy() *PriorityClassList {
if in == nil {
return nil
}
out := new(PriorityClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -0,0 +1,96 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package capabilities
import (
"sync"
)
// Capabilities defines the set of capabilities available within the system.
// For now these are global. Eventually they may be per-user
type Capabilities struct {
AllowPrivileged bool
// Pod sources from which to allow privileged capabilities like host networking, sharing the host
// IPC namespace, and sharing the host PID namespace.
PrivilegedSources PrivilegedSources
// PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach)
PerConnectionBandwidthLimitBytesPerSec int64
}
// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types
// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace.
type PrivilegedSources struct {
// List of pod sources for which using host network is allowed.
HostNetworkSources []string
// List of pod sources for which using host pid namespace is allowed.
HostPIDSources []string
// List of pod sources for which using host ipc is allowed.
HostIPCSources []string
}
var capInstance struct {
once sync.Once
lock sync.Mutex
capabilities *Capabilities
}
// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored.
func Initialize(c Capabilities) {
// Only do this once
capInstance.once.Do(func() {
capInstance.capabilities = &c
})
}
// Setup the capability set. It wraps Initialize for improving usability.
func Setup(allowPrivileged bool, perConnectionBytesPerSec int64) {
Initialize(Capabilities{
AllowPrivileged: allowPrivileged,
PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec,
})
}
// ResetForTest resets the capabilities to a given state for testing purposes.
// This function should only be called from tests.
func ResetForTest() {
capInstance.lock.Lock()
defer capInstance.lock.Unlock()
capInstance.capabilities = nil
capInstance.once = sync.Once{}
}
// Get returns a read-only copy of the system capabilities.
func Get() Capabilities {
capInstance.lock.Lock()
defer capInstance.lock.Unlock()
// This check prevents clobbering of capabilities that might've been set via SetForTests
if capInstance.capabilities == nil {
Initialize(Capabilities{
AllowPrivileged: false,
PrivilegedSources: PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
})
}
return *capInstance.capabilities
}

18
e2e/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package capabilities manages system level capabilities
package capabilities // import "k8s.io/kubernetes/pkg/capabilities"

View File

@ -0,0 +1,66 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conditions
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
)
// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var ErrPodCompleted = fmt.Errorf("pod ran to completion")
// PodRunning returns true if the pod is running, false if the pod has not yet reached running state,
// returns ErrPodCompleted if the pod has run to completion, or an error in any other case.
func PodRunning(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "")
}
switch t := event.Object.(type) {
case *v1.Pod:
switch t.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, ErrPodCompleted
}
}
return false, nil
}
// PodCompleted returns true if the pod has run to completion, false if the pod has not yet
// reached running state, or an error in any other case.
func PodCompleted(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "")
}
switch t := event.Object.(type) {
case *v1.Pod:
switch t.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return true, nil
}
}
return false, nil
}

19
e2e/vendor/k8s.io/kubernetes/pkg/cluster/ports/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ports defines ports used by various pieces of the kubernetes
// infrastructure.
package ports // import "k8s.io/kubernetes/pkg/cluster/ports"

View File

@ -0,0 +1,54 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ports
import (
cpoptions "k8s.io/cloud-provider/options"
)
// In this file, we can see all default port of cluster.
// It's also an important documentation for us. So don't remove them easily.
const (
// ProxyStatusPort is the default port for the proxy metrics server.
// May be overridden by a flag at startup.
ProxyStatusPort = 10249
// KubeletPort is the default port for the kubelet server on each host machine.
// May be overridden by a flag at startup.
KubeletPort = 10250
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet
// until heapster can transition to using the SSL endpoint.
// TODO(roberthbailey): Remove this once we have a better solution for heapster.
KubeletReadOnlyPort = 10255
// KubeletHealthzPort exposes a healthz endpoint from the kubelet.
// May be overridden by a flag at startup.
KubeletHealthzPort = 10248
// ProxyHealthzPort is the default port for the proxy healthz server.
// May be overridden by a flag at startup.
ProxyHealthzPort = 10256
// KubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
KubeControllerManagerPort = 10257
// CloudControllerManagerPort is the default port for the cloud controller manager server.
// This value may be overridden by a flag at startup.
CloudControllerManagerPort = 10258
// CloudControllerManagerWebhookPort is the default port for the cloud
// controller manager webhook server. May be overridden by a flag at
// startup.
CloudControllerManagerWebhookPort = cpoptions.CloudControllerManagerWebhookPort
)

17
e2e/vendor/k8s.io/kubernetes/pkg/controller/OWNERS generated vendored Normal file
View File

@ -0,0 +1,17 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- deads2k
- derekwaynecarr
- mikedanese
- janetkuo
- cheftako
- andrewsykim
- sig-apps-approvers
reviewers:
- andrewsykim
- deads2k
- cheftako
- sig-apps-reviewers
labels:
- sig/apps

View File

@ -0,0 +1,596 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"encoding/json"
"fmt"
"sync"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
)
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
CanAdoptFunc func(ctx context.Context) error
}
func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error {
m.canAdoptOnce.Do(func() {
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc(ctx)
}
})
return m.canAdoptErr
}
// ClaimObject tries to take ownership of an object for this controller.
//
// It will reconcile the following:
// - Adopt orphans if the match function returns true.
// - Release owned objects if the match function returns false.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The returned boolean indicates whether you now
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
func (m *BaseControllerRefManager) ClaimObject(ctx context.Context, obj metav1.Object, match func(metav1.Object) bool, adopt, release func(context.Context, metav1.Object) error) (bool, error) {
controllerRef := metav1.GetControllerOfNoCopy(obj)
if controllerRef != nil {
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore.
return false, nil
}
if match(obj) {
// We already own it and the selector matches.
// Return true (successfully claimed) before checking deletion timestamp.
// We're still allowed to claim things we already own while being deleted
// because doing so requires taking no actions.
return true, nil
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(ctx, obj); err != nil {
// If the pod no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else released it, or there was a transient error.
// The controller should requeue and try again if it's still stale.
return false, err
}
// Successfully released.
return false, nil
}
// It's an orphan.
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
if obj.GetDeletionTimestamp() != nil {
// Ignore if the object is being deleted
return false, nil
}
if len(m.Controller.GetNamespace()) > 0 && m.Controller.GetNamespace() != obj.GetNamespace() {
// Ignore if namespace not match
return false, nil
}
// Selector matches. Try to adopt.
if err := adopt(ctx, obj); err != nil {
// If the pod no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else claimed it first, or there was a transient error.
// The controller should requeue and try again if it's still orphaned.
return false, err
}
// Successfully adopted.
return true, nil
}
type PodControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
podControl PodControlInterface
finalizers []string
}
// NewPodControllerRefManager returns a PodControllerRefManager that exposes
// methods to manage the controllerRef of pods.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// PodControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager(
podControl PodControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func(ctx context.Context) error,
finalizers ...string,
) *PodControllerRefManager {
return &PodControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
podControl: podControl,
finalizers: finalizers,
}
}
// ClaimPods tries to take ownership of a list of Pods.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// Optional: If one or more filters are specified, a Pod will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of Pods that you now own is returned.
func (m *PodControllerRefManager) ClaimPods(ctx context.Context, pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
var claimed []*v1.Pod
var errlist []error
match := func(obj metav1.Object) bool {
pod := obj.(*v1.Pod)
// Check selector first so filters only run on potentially matching Pods.
if !m.Selector.Matches(labels.Set(pod.Labels)) {
return false
}
for _, filter := range filters {
if !filter(pod) {
return false
}
}
return true
}
adopt := func(ctx context.Context, obj metav1.Object) error {
return m.AdoptPod(ctx, obj.(*v1.Pod))
}
release := func(ctx context.Context, obj metav1.Object) error {
return m.ReleasePod(ctx, obj.(*v1.Pod))
}
for _, pod := range pods {
ok, err := m.ClaimObject(ctx, pod, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, pod)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails.
func (m *PodControllerRefManager) AdoptPod(ctx context.Context, pod *v1.Pod) error {
if err := m.CanAdopt(ctx); err != nil {
return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, pod.UID, m.finalizers...)
if err != nil {
return err
}
return m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes)
}
// ReleasePod sends a patch to free the pod from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(ctx context.Context, pod *v1.Pod) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Patching pod to remove its controllerRef", "pod", klog.KObj(pod), "gvk", m.controllerKind, "controller", m.Controller.GetName())
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(pod.UID, []types.UID{m.Controller.GetUID()}, m.finalizers...)
if err != nil {
return err
}
err = m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes)
if err != nil {
if errors.IsNotFound(err) {
// If the pod no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the pod
// has no owner reference, 2. the uid of the pod doesn't
// match, which means the pod is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the pod has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}
// ReplicaSetControllerRefManager is used to manage controllerRef of ReplicaSets.
// Three methods are defined on this object 1: Classify 2: AdoptReplicaSet and
// 3: ReleaseReplicaSet which are used to classify the ReplicaSets into appropriate
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ReplicaSetControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
rsControl RSControlInterface
}
// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes
// methods to manage the controllerRef of ReplicaSets.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager(
rsControl RSControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func(ctx context.Context) error,
) *ReplicaSetControllerRefManager {
return &ReplicaSetControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
rsControl: rsControl,
}
}
// ClaimReplicaSets tries to take ownership of a list of ReplicaSets.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ReplicaSets that you now own is
// returned.
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(ctx context.Context, sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
var claimed []*apps.ReplicaSet
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(ctx context.Context, obj metav1.Object) error {
return m.AdoptReplicaSet(ctx, obj.(*apps.ReplicaSet))
}
release := func(ctx context.Context, obj metav1.Object) error {
return m.ReleaseReplicaSet(ctx, obj.(*apps.ReplicaSet))
}
for _, rs := range sets {
ok, err := m.ClaimObject(ctx, rs, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, rs)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
// the error if the patching fails.
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(ctx context.Context, rs *apps.ReplicaSet) error {
if err := m.CanAdopt(ctx); err != nil {
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, rs.UID)
if err != nil {
return err
}
return m.rsControl.PatchReplicaSet(ctx, rs.Namespace, rs.Name, patchBytes)
}
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(ctx context.Context, replicaSet *apps.ReplicaSet) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Patching ReplicaSet to remove its controllerRef", "replicaSet", klog.KObj(replicaSet), "gvk", m.controllerKind, "controller", m.Controller.GetName())
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(replicaSet.UID, []types.UID{m.Controller.GetUID()})
if err != nil {
return err
}
err = m.rsControl.PatchReplicaSet(ctx, replicaSet.Namespace, replicaSet.Name, patchBytes)
if err != nil {
if errors.IsNotFound(err) {
// If the ReplicaSet no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ReplicaSet
// has no owner reference, 2. the uid of the ReplicaSet doesn't
// match, which means the ReplicaSet is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func(context.Context) (metav1.Object, error)) func(context.Context) error {
return func(ctx context.Context) error {
obj, err := getObject(ctx)
if err != nil {
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
}
if obj.GetDeletionTimestamp() != nil {
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
}
return nil
}
}
// ControllerRevisionControllerRefManager is used to manage controllerRef of ControllerRevisions.
// Three methods are defined on this object 1: Classify 2: AdoptControllerRevision and
// 3: ReleaseControllerRevision which are used to classify the ControllerRevisions into appropriate
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ControllerRevisionControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
crControl ControllerRevisionControlInterface
}
// NewControllerRevisionControllerRefManager returns a ControllerRevisionControllerRefManager that exposes
// methods to manage the controllerRef of ControllerRevisions.
//
// The canAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If canAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once canAdopt() is called, it will not be called again by the same
// ControllerRevisionControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
func NewControllerRevisionControllerRefManager(
crControl ControllerRevisionControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func(ctx context.Context) error,
) *ControllerRevisionControllerRefManager {
return &ControllerRevisionControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
crControl: crControl,
}
}
// ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned.
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(ctx context.Context, histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
var claimed []*apps.ControllerRevision
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(ctx context.Context, obj metav1.Object) error {
return m.AdoptControllerRevision(ctx, obj.(*apps.ControllerRevision))
}
release := func(ctx context.Context, obj metav1.Object) error {
return m.ReleaseControllerRevision(ctx, obj.(*apps.ControllerRevision))
}
for _, h := range histories {
ok, err := m.ClaimObject(ctx, h, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, h)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
if err := m.CanAdopt(ctx); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, history.UID)
if err != nil {
return err
}
return m.crControl.PatchControllerRevision(ctx, history.Namespace, history.Name, patchBytes)
}
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Patching ControllerRevision to remove its controllerRef", "controllerRevision", klog.KObj(history), "gvk", m.controllerKind, "controller", m.Controller.GetName())
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(history.UID, []types.UID{m.Controller.GetUID()})
if err != nil {
return err
}
err = m.crControl.PatchControllerRevision(ctx, history.Namespace, history.Name, patchBytes)
if err != nil {
if errors.IsNotFound(err) {
// If the ControllerRevision no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ControllerRevision
// has no owner reference, 2. the uid of the ControllerRevision doesn't
// match, which means the ControllerRevision is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
type objectForAddOwnerRefPatch struct {
Metadata objectMetaForPatch `json:"metadata"`
}
type objectMetaForPatch struct {
OwnerReferences []metav1.OwnerReference `json:"ownerReferences"`
UID types.UID `json:"uid"`
Finalizers []string `json:"finalizers,omitempty"`
}
func ownerRefControllerPatch(controller metav1.Object, controllerKind schema.GroupVersionKind, uid types.UID, finalizers ...string) ([]byte, error) {
blockOwnerDeletion := true
isController := true
addControllerPatch := objectForAddOwnerRefPatch{
Metadata: objectMetaForPatch{
UID: uid,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: controllerKind.GroupVersion().String(),
Kind: controllerKind.Kind,
Name: controller.GetName(),
UID: controller.GetUID(),
Controller: &isController,
BlockOwnerDeletion: &blockOwnerDeletion,
},
},
Finalizers: finalizers,
},
}
patchBytes, err := json.Marshal(&addControllerPatch)
if err != nil {
return nil, err
}
return patchBytes, nil
}
type objectForDeleteOwnerRefStrategicMergePatch struct {
Metadata objectMetaForMergePatch `json:"metadata"`
}
type objectMetaForMergePatch struct {
UID types.UID `json:"uid"`
OwnerReferences []map[string]string `json:"ownerReferences"`
DeleteFinalizers []string `json:"$deleteFromPrimitiveList/finalizers,omitempty"`
}
func GenerateDeleteOwnerRefStrategicMergeBytes(dependentUID types.UID, ownerUIDs []types.UID, finalizers ...string) ([]byte, error) {
var ownerReferences []map[string]string
for _, ownerUID := range ownerUIDs {
ownerReferences = append(ownerReferences, ownerReference(ownerUID, "delete"))
}
patch := objectForDeleteOwnerRefStrategicMergePatch{
Metadata: objectMetaForMergePatch{
UID: dependentUID,
OwnerReferences: ownerReferences,
DeleteFinalizers: finalizers,
},
}
patchBytes, err := json.Marshal(&patch)
if err != nil {
return nil, err
}
return patchBytes, nil
}
func ownerReference(uid types.UID, patchType string) map[string]string {
return map[string]string{
"$patch": patchType,
"uid": string(uid),
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,951 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"fmt"
"math"
"sort"
"strconv"
"strings"
"time"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/utils/integer"
)
const (
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
RevisionAnnotation = "deployment.kubernetes.io/revision"
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
// in its replica sets. Helps in separating scaling events from the rollout process and for
// determining if the new replica set for a deployment is really saturated.
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
// proportions in case the deployment has surge replicas.
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
// RollbackRevisionNotFound is not found rollback event reason
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
// RollbackTemplateUnchanged is the template unchanged rollback event reason
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
// RollbackDone is the done rollback event reason
RollbackDone = "DeploymentRollback"
// Reasons for deployment conditions
//
// Progressing:
// ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part
// of the rollout process.
ReplicaSetUpdatedReason = "ReplicaSetUpdated"
// FailedRSCreateReason is added in a deployment when it cannot create a new replica set.
FailedRSCreateReason = "ReplicaSetCreateError"
// NewReplicaSetReason is added in a deployment when it creates a new replica set.
NewReplicaSetReason = "NewReplicaSetCreated"
// FoundNewRSReason is added in a deployment when it adopts an existing replica set.
FoundNewRSReason = "FoundNewReplicaSet"
// NewRSAvailableReason is added in a deployment when its newest replica set is made available
// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds
// is at least the minimum available pods that need to run for the deployment.
NewRSAvailableReason = "NewReplicaSetAvailable"
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
// PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be
// estimated once a deployment is paused.
PausedDeployReason = "DeploymentPaused"
// ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally
// deployments that paused amidst a rollout and are bounded by a deadline.
ResumedDeployReason = "DeploymentResumed"
//
// Available:
// MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available.
MinimumReplicasAvailable = "MinimumReplicasAvailable"
// MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas
// available.
MinimumReplicasUnavailable = "MinimumReplicasUnavailable"
)
// NewDeploymentCondition creates a new deployment condition.
func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition {
return &apps.DeploymentCondition{
Type: condType,
Status: status,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) {
currentCond := GetDeploymentCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition {
var newConditions []apps.DeploymentCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
// Useful for promoting replica set failure conditions into deployments.
func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentConditionType(cond.Type),
Status: cond.Status,
LastTransitionTime: cond.LastTransitionTime,
LastUpdateTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
}
}
// SetDeploymentRevision updates the revision for a deployment.
func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool {
updated := false
if deployment.Annotations == nil {
deployment.Annotations = make(map[string]string)
}
if deployment.Annotations[RevisionAnnotation] != revision {
deployment.Annotations[RevisionAnnotation] = revision
updated = true
}
return updated
}
// MaxRevision finds the highest revision in the replica sets
func MaxRevision(logger klog.Logger, allRSs []*apps.ReplicaSet) int64 {
max := int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
logger.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
} else if v > max {
max = v
}
}
return max
}
// LastRevision finds the second max revision number in all replica sets (the last revision)
func LastRevision(logger klog.Logger, allRSs []*apps.ReplicaSet) int64 {
max, secMax := int64(0), int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
logger.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
} else if v >= max {
secMax = max
max = v
} else if v > secMax {
secMax = v
}
}
return secMax
}
// Revision returns the revision number of the input object.
func Revision(obj runtime.Object) (int64, error) {
acc, err := meta.Accessor(obj)
if err != nil {
return 0, err
}
v, ok := acc.GetAnnotations()[RevisionAnnotation]
if !ok {
return 0, nil
}
return strconv.ParseInt(v, 10, 64)
}
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
func SetNewReplicaSetAnnotations(ctx context.Context, deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool {
logger := klog.FromContext(ctx)
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
// Then, update replica set's revision annotation
if newRS.Annotations == nil {
newRS.Annotations = make(map[string]string)
}
oldRevision, ok := newRS.Annotations[RevisionAnnotation]
// The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number
// of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and
// newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision.
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
if err != nil {
if oldRevision != "" {
logger.Info("Updating replica set revision OldRevision not int", "err", err)
return false
}
//If the RS annotation is empty then initialise it to 0
oldRevisionInt = 0
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
if err != nil {
logger.Info("Updating replica set revision NewRevision not int", "err", err)
return false
}
if oldRevisionInt < newRevisionInt {
newRS.Annotations[RevisionAnnotation] = newRevision
annotationChanged = true
logger.V(4).Info("Updating replica set revision", "replicaSet", klog.KObj(newRS), "newRevision", newRevision)
}
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
// for historical information.
if ok && oldRevisionInt < newRevisionInt {
revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation]
oldRevisions := strings.Split(revisionHistoryAnnotation, ",")
if len(oldRevisions[0]) == 0 {
newRS.Annotations[RevisionHistoryAnnotation] = oldRevision
} else {
totalLen := len(revisionHistoryAnnotation) + len(oldRevision) + 1
// index for the starting position in oldRevisions
start := 0
for totalLen > revHistoryLimitInChars && start < len(oldRevisions) {
totalLen = totalLen - len(oldRevisions[start]) - 1
start++
}
if totalLen <= revHistoryLimitInChars {
oldRevisions = append(oldRevisions[start:], oldRevision)
newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",")
} else {
logger.Info("Not appending revision due to revision history length limit reached", "revisionHistoryLimit", revHistoryLimitInChars)
}
}
}
// If the new replica set is about to be created, we need to add replica annotations to it.
if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) {
annotationChanged = true
}
return annotationChanged
}
var annotationsToSkip = map[string]bool{
v1.LastAppliedConfigAnnotation: true,
RevisionAnnotation: true,
RevisionHistoryAnnotation: true,
DesiredReplicasAnnotation: true,
MaxReplicasAnnotation: true,
apps.DeprecatedRollbackTo: true,
}
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied?
//
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation(key string) bool {
return annotationsToSkip[key]
}
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
// and returns true if replica set's annotation is changed.
// Note that apply and revision annotations are not copied.
func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
rsAnnotationsChanged := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
}
for k, v := range deployment.Annotations {
// newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated
// by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of
// deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable.
if _, exist := rs.Annotations[k]; skipCopyAnnotation(k) || (exist && rs.Annotations[k] == v) {
continue
}
rs.Annotations[k] = v
rsAnnotationsChanged = true
}
return rsAnnotationsChanged
}
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
// This action should be done if and only if the deployment is rolling back to this rs.
// Note that apply and revision annotations are not changed.
func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) {
deployment.Annotations = getSkippedAnnotations(deployment.Annotations)
for k, v := range rollbackToRS.Annotations {
if !skipCopyAnnotation(k) {
deployment.Annotations[k] = v
}
}
}
func getSkippedAnnotations(annotations map[string]string) map[string]string {
skippedAnnotations := make(map[string]string)
for k, v := range annotations {
if skipCopyAnnotation(k) {
skippedAnnotations[k] = v
}
}
return skippedAnnotations
}
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
// replica set. If there are more active replica sets, then we should proportionally scale them.
func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet {
if newRS == nil && len(oldRSs) == 0 {
return nil
}
sort.Sort(sort.Reverse(controller.ReplicaSetsByCreationTimestamp(oldRSs)))
allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS))
switch len(allRSs) {
case 0:
// If there is no active replica set then we should return the newest.
if newRS != nil {
return newRS
}
return oldRSs[0]
case 1:
return allRSs[0]
default:
return nil
}
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
func GetDesiredReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
return getNonNegativeInt32FromAnnotationVerbose(logger, rs, DesiredReplicasAnnotation)
}
func getMaxReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
return getNonNegativeInt32FromAnnotationVerbose(logger, rs, MaxReplicasAnnotation)
}
func getNonNegativeInt32FromAnnotationVerbose(logger klog.Logger, rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
value, ok, err := getNonNegativeInt32FromAnnotation(rs, annotationKey)
if err != nil {
logger.V(2).Info("Could not convert the value with annotation key for the replica set", "annotationValue", rs.Annotations[annotationKey], "annotationKey", annotationKey, "replicaSet", klog.KObj(rs))
}
return value, ok
}
func getNonNegativeInt32FromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool, error) {
annotationValue, ok := rs.Annotations[annotationKey]
if !ok {
return int32(0), false, nil
}
intValue, err := strconv.ParseUint(annotationValue, 10, 32)
if err != nil {
return int32(0), false, err
}
if intValue > math.MaxInt32 {
return int32(0), false, fmt.Errorf("value %d is out of range (higher than %d)", intValue, math.MaxInt32)
}
return int32(intValue), true, nil
}
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
updated := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
}
desiredString := fmt.Sprintf("%d", desiredReplicas)
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
rs.Annotations[DesiredReplicasAnnotation] = desiredString
updated = true
}
maxString := fmt.Sprintf("%d", maxReplicas)
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
rs.Annotations[MaxReplicasAnnotation] = maxString
updated = true
}
return updated
}
// ReplicasAnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated
func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
if rs.Annotations == nil {
return true
}
desiredString := fmt.Sprintf("%d", desiredReplicas)
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
return true
}
maxString := fmt.Sprintf("%d", maxReplicas)
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
return true
}
return false
}
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func MaxUnavailable(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
return int32(0)
}
// Error caught by validation
_, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
if maxUnavailable > *deployment.Spec.Replicas {
return *deployment.Spec.Replicas
}
return maxUnavailable
}
// MinAvailable returns the minimum available pods of a given deployment
func MinAvailable(deployment *apps.Deployment) int32 {
if !IsRollingUpdate(deployment) {
return int32(0)
}
return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment)
}
// MaxSurge returns the maximum surge pods a rolling deployment can take.
func MaxSurge(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) {
return int32(0)
}
// Error caught by validation
maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
return maxSurge
}
// GetReplicaSetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetReplicaSetProportion(logger klog.Logger, rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
return int32(0)
}
rsFraction := getReplicaSetFraction(logger, *rs, d)
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
if deploymentReplicasToAdd > 0 {
// Use the minimum between the replica set fraction and the maximum allowed replicas
// when scaling up. This way we ensure we will not scale up more than the allowed
// replicas we can add.
return min(rsFraction, allowed)
}
// Use the maximum between the replica set fraction and the maximum allowed replicas
// when scaling down. This way we ensure we will not scale down more than the allowed
// replicas we can remove.
return max(rsFraction, allowed)
}
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction(logger klog.Logger, rs apps.ReplicaSet, d apps.Deployment) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
if *(d.Spec.Replicas) == int32(0) {
return -*(rs.Spec.Replicas)
}
deploymentMaxReplicas := *(d.Spec.Replicas) + MaxSurge(d)
deploymentMaxReplicasBeforeScale, ok := getMaxReplicasAnnotation(logger, &rs)
if !ok || deploymentMaxReplicasBeforeScale == 0 {
// If we cannot find the annotation then fallback to the current deployment size.
// This can occur if someone tampers with the annotation (removes it, sets it to an invalid value, or to 0).
// Note that this will not be an accurate proportion estimation in case other replica sets have different values
// which means that the deployment was scaled at some point but we at least will stay in limits
// due to the min-max comparisons in GetReplicaSetProportion.
deploymentMaxReplicasBeforeScale = d.Status.Replicas
if deploymentMaxReplicasBeforeScale == 0 {
// Rare situation: missing annotation; some actor has removed it and pods are failing to be created.
return 0
}
}
// We should never proportionally scale up from zero (see GetReplicaSetProportion) which means rs.spec.replicas will never be zero here.
scaleBase := *(rs.Spec.Replicas)
// deploymentMaxReplicasBeforeScale should normally be a positive value, and we have made sure that it is not a zero.
newRSsize := (float64(scaleBase * deploymentMaxReplicas)) / float64(deploymentMaxReplicasBeforeScale)
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
}
// RsListFromClient returns an rsListFunc that wraps the given client.
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options)
if err != nil {
return nil, err
}
var ret []*apps.ReplicaSet
for i := range rsList.Items {
ret = append(ret, &rsList.Items[i])
}
return ret, err
}
}
// TODO: switch RsListFunc and podListFunc to full namespacers
// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions.
type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
// podListFunc returns the PodList from the Pod namespace and the List metav1.ListOptions.
type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error)
// ListReplicaSets returns a slice of RSes the given deployment targets.
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getRSList(namespace, options)
if err != nil {
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make([]*apps.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
owned = append(owned, rs)
}
}
return owned, nil
}
// ListPods returns a list of pods the given deployment targets.
// This needs a list of ReplicaSets for the Deployment,
// which can be found with ListReplicaSets().
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getPodList(namespace, options)
if err != nil {
return all, err
}
// Only include those whose ControllerRef points to a ReplicaSet that is in
// turn owned by this Deployment.
rsMap := make(map[types.UID]bool, len(rsList))
for _, rs := range rsList {
rsMap[rs.UID] = true
}
owned := &v1.PodList{Items: make([]v1.Pod, 0, len(all.Items))}
for i := range all.Items {
pod := &all.Items[i]
controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil && rsMap[controllerRef.UID] {
owned.Items = append(owned.Items, *pod)
}
}
return owned, nil
}
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList))
for i := range rsList {
if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
return rsList[i]
}
}
// new ReplicaSet does not exist.
return nil
}
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) {
var requiredRSs []*apps.ReplicaSet
var allRSs []*apps.ReplicaSet
newRS := FindNewReplicaSet(deployment, rsList)
for _, rs := range rsList {
// Filter out new replica set
if newRS != nil && rs.UID == newRS.UID {
continue
}
allRSs = append(allRSs, rs)
if *(rs.Spec.Replicas) != 0 {
requiredRSs = append(requiredRSs, rs)
}
}
return requiredRSs, allRSs
}
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment {
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
deployment.Spec.Template.Spec = template.Spec
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
deployment.Spec.Template.ObjectMeta.Labels,
apps.DefaultDeploymentUniqueLabelKey)
return deployment
}
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalReplicas += *(rs.Spec.Replicas)
}
}
return totalReplicas
}
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalActualReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalActualReplicas += rs.Status.Replicas
}
}
return totalActualReplicas
}
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReadyReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalReadyReplicas += rs.Status.ReadyReplicas
}
}
return totalReadyReplicas
}
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalAvailableReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalAvailableReplicas += rs.Status.AvailableReplicas
}
}
return totalAvailableReplicas
}
// IsRollingUpdate returns true if the strategy type is a rolling update.
func IsRollingUpdate(deployment *apps.Deployment) bool {
return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
}
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
// are updated and available, and no old pods are running.
func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
newStatus.Replicas == *(deployment.Spec.Replicas) &&
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
newStatus.ObservedGeneration >= deployment.Generation
}
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
// current with the new status of the deployment that the controller is observing. More specifically,
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
// consider the deployment is progressing.
func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
oldStatus := deployment.Status
// Old replicas that need to be scaled down
oldStatusOldReplicas := oldStatus.Replicas - oldStatus.UpdatedReplicas
newStatusOldReplicas := newStatus.Replicas - newStatus.UpdatedReplicas
return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) ||
(newStatusOldReplicas < oldStatusOldReplicas) ||
newStatus.ReadyReplicas > deployment.Status.ReadyReplicas ||
newStatus.AvailableReplicas > deployment.Status.AvailableReplicas
}
// used for unit testing
var nowFn = func() time.Time { return time.Now() }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
func DeploymentTimedOut(ctx context.Context, deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
if !HasProgressDeadline(deployment) {
return false
}
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
// again.
condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing)
if condition == nil {
return false
}
// If the previous condition has been a successful rollout then we shouldn't try to
// estimate any progress. Scenario:
//
// * progressDeadlineSeconds is smaller than the difference between now and the time
// the last rollout finished in the past.
// * the creation of a new ReplicaSet triggers a resync of the Deployment prior to the
// cached copy of the Deployment getting updated with the status.condition that indicates
// the creation of the new ReplicaSet.
//
// The Deployment will be resynced and eventually its Progressing condition will catch
// up with the state of the world.
if condition.Reason == NewRSAvailableReason {
return false
}
if condition.Reason == TimedOutReason {
return true
}
logger := klog.FromContext(ctx)
// Look at the difference in seconds between now and the last time we reported any
// progress or tried to create a replica set, or resumed a paused deployment and
// compare against progressDeadlineSeconds.
from := condition.LastUpdateTime
now := nowFn()
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
timedOut := from.Add(delta).Before(now)
logger.V(4).Info("Deployment timed out from last progress check", "deployment", klog.KObj(deployment), "timeout", timedOut, "from", from, "now", now)
return timedOut
}
// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have.
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) {
switch deployment.Spec.Strategy.Type {
case apps.RollingUpdateDeploymentStrategyType:
// Check if we can scale up.
maxSurge, err := intstrutil.GetScaledValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
if err != nil {
return 0, err
}
// Find the total number of pods
currentPodCount := GetReplicaCountForReplicaSets(allRSs)
maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge)
if currentPodCount >= maxTotalPods {
// Cannot scale up.
return *(newRS.Spec.Replicas), nil
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas.
scaleUpCount = min(scaleUpCount, *(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))
return *(newRS.Spec.Replicas) + scaleUpCount, nil
case apps.RecreateDeploymentStrategyType:
return *(deployment.Spec.Replicas), nil
default:
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
}
}
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
// Both the deployment and the replica set have to believe this replica set can own all of the desired
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
if rs == nil {
return false
}
desiredString := rs.Annotations[DesiredReplicasAnnotation]
desired, err := strconv.Atoi(desiredString)
if err != nil {
return false
}
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) &&
int32(desired) == *(deployment.Spec.Replicas) &&
rs.Status.AvailableReplicas == *(deployment.Spec.Replicas)
}
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
// Returns error if polling timesout.
func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
return wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := getDeploymentFunc()
if err != nil {
return false, err
}
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
})
}
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
// step. For example:
//
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt32(0)), int(desired), true)
if err != nil {
return 0, 0, err
}
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt32(0)), int(desired), false)
if err != nil {
return 0, 0, err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
return int32(surge), int32(unavailable), nil
}
// HasProgressDeadline checks if the Deployment d is expected to surface the reason
// "ProgressDeadlineExceeded" when the Deployment progress takes longer than expected time.
func HasProgressDeadline(d *apps.Deployment) bool {
return d.Spec.ProgressDeadlineSeconds != nil && *d.Spec.ProgressDeadlineSeconds != math.MaxInt32
}
// HasRevisionHistoryLimit checks if the Deployment d is expected to keep a specified number of
// old replicaSets. These replicaSets are mainly kept with the purpose of rollback.
// The RevisionHistoryLimit can start from 0 (no retained replicasSet). When set to math.MaxInt32,
// the Deployment will keep all revisions.
func HasRevisionHistoryLimit(d *apps.Deployment) bool {
return d.Spec.RevisionHistoryLimit != nil && *d.Spec.RevisionHistoryLimit != math.MaxInt32
}
// GetDeploymentsForReplicaSet returns a list of Deployments that potentially
// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef
// will actually manage it.
// Returns an error only if no matching Deployments are found.
func GetDeploymentsForReplicaSet(deploymentLister appslisters.DeploymentLister, rs *apps.ReplicaSet) ([]*apps.Deployment, error) {
if len(rs.Labels) == 0 {
return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
}
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
dList, err := deploymentLister.Deployments(rs.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var deployments []*apps.Deployment
for _, d := range dList {
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
// This object has an invalid selector, it does not match the replicaset
continue
}
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
continue
}
deployments = append(deployments, d)
}
if len(deployments) == 0 {
return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
}
return deployments, nil
}
// ReplicaSetsByRevision sorts a list of ReplicaSet by revision, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from old to new replica sets.
type ReplicaSetsByRevision []*apps.ReplicaSet
func (o ReplicaSetsByRevision) Len() int { return len(o) }
func (o ReplicaSetsByRevision) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByRevision) Less(i, j int) bool {
revision1, err1 := Revision(o[i])
revision2, err2 := Revision(o[j])
if err1 != nil || err2 != nil || revision1 == revision2 {
return controller.ReplicaSetsByCreationTimestamp(o).Less(i, j)
}
return revision1 < revision2
}

19
e2e/vendor/k8s.io/kubernetes/pkg/controller/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller contains code for controllers (like the replication
// controller).
package controller // import "k8s.io/kubernetes/pkg/controller"

4
e2e/vendor/k8s.io/kubernetes/pkg/features/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- feature-approvers

View File

@ -0,0 +1,76 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"fmt"
clientfeatures "k8s.io/client-go/features"
"k8s.io/component-base/featuregate"
)
// clientAdapter adapts a k8s.io/component-base/featuregate.MutableFeatureGate to client-go's
// feature Gate and Registry interfaces. The component-base types Feature, FeatureSpec, and
// prerelease, and the component-base prerelease constants, are duplicated by parallel types and
// constants in client-go. The parallel types exist to allow the feature gate mechanism to be used
// for client-go features without introducing a circular dependency between component-base and
// client-go.
type clientAdapter struct {
mfg featuregate.MutableFeatureGate
}
var _ clientfeatures.Gates = &clientAdapter{}
func (a *clientAdapter) Enabled(name clientfeatures.Feature) bool {
return a.mfg.Enabled(featuregate.Feature(name))
}
var _ clientfeatures.Registry = &clientAdapter{}
func (a *clientAdapter) Add(in map[clientfeatures.Feature]clientfeatures.FeatureSpec) error {
out := map[featuregate.Feature]featuregate.FeatureSpec{}
for name, spec := range in {
converted := featuregate.FeatureSpec{
Default: spec.Default,
LockToDefault: spec.LockToDefault,
}
switch spec.PreRelease {
case clientfeatures.Alpha:
converted.PreRelease = featuregate.Alpha
case clientfeatures.Beta:
converted.PreRelease = featuregate.Beta
case clientfeatures.GA:
converted.PreRelease = featuregate.GA
case clientfeatures.Deprecated:
converted.PreRelease = featuregate.Deprecated
default:
// The default case implies programmer error. The same set of prerelease
// constants must exist in both component-base and client-go, and each one
// must have a case here.
panic(fmt.Sprintf("unrecognized prerelease %q of feature %q", spec.PreRelease, name))
}
out[featuregate.Feature(name)] = converted
}
return a.mfg.Add(out)
}
// Set implements the unexported interface that client-go feature gate testing expects for
// ek8s.io/client-go/features/testing.SetFeatureDuringTest. This is necessary for integration tests
// to set test overrides for client-go feature gates.
func (a *clientAdapter) Set(name clientfeatures.Feature, enabled bool) error {
return a.mfg.SetFromMap(map[string]bool{string(name): enabled})
}

View File

@ -0,0 +1,894 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientfeatures "k8s.io/client-go/features"
"k8s.io/component-base/featuregate"
zpagesfeatures "k8s.io/component-base/zpages/features"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // kep: https://kep.k8s.io/NNN
// MyFeature featuregate.Feature = "MyFeature"
//
// Feature gates should be listed in alphabetical, case-sensitive
// (upper before any lower case character) order. This reduces the risk
// of code conflicts because changes are more likely to be scattered
// across the file.
// owner: @aojea
// Deprecated: v1.31
//
// Allow kubelet to request a certificate without any Node IP available, only
// with DNS names.
AllowDNSOnlyNodeCSR featuregate.Feature = "AllowDNSOnlyNodeCSR"
// owner: @HirazawaUi
// Deprecated: v1.32
//
// Allow spec.terminationGracePeriodSeconds to be overridden by MaxPodGracePeriodSeconds in soft evictions.
AllowOverwriteTerminationGracePeriodSeconds featuregate.Feature = "AllowOverwriteTerminationGracePeriodSeconds"
// owner: @thockin
// Deprecated: v1.29
//
// Enables Service.status.ingress.loadBanace to be set on
// services of types other than LoadBalancer.
AllowServiceLBStatusOnNonLB featuregate.Feature = "AllowServiceLBStatusOnNonLB"
// owner: @bswartz
//
// Enables usage of any object for volume data source in PVCs
AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource"
// owner: @tallclair
AppArmor featuregate.Feature = "AppArmor"
// owner: @tallclair
AppArmorFields featuregate.Feature = "AppArmorFields"
// owner: @liggitt
// kep:
//
// Make the Node authorizer use fine-grained selector authorization.
// Requires AuthorizeWithSelectors to be enabled.
AuthorizeNodeWithSelectors featuregate.Feature = "AuthorizeNodeWithSelectors"
// owner: @ahmedtd
//
// Enable ClusterTrustBundle objects and Kubelet integration.
ClusterTrustBundle featuregate.Feature = "ClusterTrustBundle"
// owner: @ahmedtd
//
// Enable ClusterTrustBundle Kubelet projected volumes. Depends on ClusterTrustBundle.
ClusterTrustBundleProjection featuregate.Feature = "ClusterTrustBundleProjection"
// owner: @szuecs
//
// Enable nodes to change CPUCFSQuotaPeriod
CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod"
// owner: @ConnorDoyle, @fromanirh (only for GA graduation)
//
// Alternative container-level CPU affinity policies.
CPUManager featuregate.Feature = "CPUManager"
// owner: @fromanirh
// beta: see below.
//
// Allow fine-tuning of cpumanager policies, experimental, alpha-quality options
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// We want to avoid a proliferation of feature gates. This feature gate:
// - will guard *a group* of cpumanager options whose quality level is alpha.
// - will never graduate to beta or stable.
// See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// for details about the removal of this feature gate.
CPUManagerPolicyAlphaOptions featuregate.Feature = "CPUManagerPolicyAlphaOptions"
// owner: @fromanirh
// beta: see below.
//
// Allow fine-tuning of cpumanager policies, experimental, beta-quality options
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// We want to avoid a proliferation of feature gates. This feature gate:
// - will guard *a group* of cpumanager options whose quality level is beta.
// - is thus *introduced* as beta
// - will never graduate to stable.
// See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// for details about the removal of this feature gate.
CPUManagerPolicyBetaOptions featuregate.Feature = "CPUManagerPolicyBetaOptions"
// owner: @fromanirh
//
// Allow the usage of options to fine-tune the cpumanager policies.
CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions"
// owner: @jefftree
// kep: https://kep.k8s.io/4355
//
// Enables coordinated leader election in the API server
CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection"
// owner: @trierra
// kep: http://kep.k8s.io/2589
//
// Enables the Portworx in-tree driver to Portworx migration feature.
CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx"
// owner: @fengzixu
//
// Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it.
CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth"
// owner: @adrianreber
// kep: https://kep.k8s.io/2008
//
// Enables container Checkpoint support in the kubelet
ContainerCheckpoint featuregate.Feature = "ContainerCheckpoint"
// owner: @helayoty
// kep: https://kep.k8s.io/4026
//
// Set the scheduled time as an annotation in the job.
CronJobsScheduledAnnotation featuregate.Feature = "CronJobsScheduledAnnotation"
// owner: @ttakahashi21 @mkimuram
// kep: https://kep.k8s.io/3294
//
// Enable usage of Provision of PVCs from snapshots in other namespaces
CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource"
// owner: @elezar
// kep: http://kep.k8s.io/4009
//
// Add support for CDI Device IDs in the Device Plugin API.
DevicePluginCDIDevices featuregate.Feature = "DevicePluginCDIDevices"
// owner: @aojea
//
// The apiservers with the MultiCIDRServiceAllocator feature enable, in order to support live migration from the old bitmap ClusterIP
// allocators to the new IPAddress allocators introduced by the MultiCIDRServiceAllocator feature, performs a dual-write on
// both allocators. This feature gate disables the dual write on the new Cluster IP allocators.
DisableAllocatorDualWrite featuregate.Feature = "DisableAllocatorDualWrite"
// owner: @andrewsykim
//
// Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag.
DisableCloudProviders featuregate.Feature = "DisableCloudProviders"
// owner: @andrewsykim
//
// Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials.
DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders"
// owner: @micahhausler
// Deprecated: v1.31
//
// Setting AllowInsecureKubeletCertificateSigningRequests to true disables node admission validation of CSRs
// for kubelet signers where CN=system:node:$nodeName.
// Remove in v1.33
AllowInsecureKubeletCertificateSigningRequests featuregate.Feature = "AllowInsecureKubeletCertificateSigningRequests"
// owner: @hoskeri
// Deprecated: v1.32
//
// Restores previous behavior where Kubelet fails self registration if node create returns 403 Forbidden.
// Remove in v1.34
KubeletRegistrationGetOnExistsOnly featuregate.Feature = "KubeletRegistrationGetOnExistsOnly"
// owner: @HirazawaUi
// kep: http://kep.k8s.io/4004
// Deprecated: v1.31 (default off)
//
// DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node
DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion"
// owner: @pohly
// kep: http://kep.k8s.io/4381
//
// Enables support for requesting admin access in a ResourceClaim.
// Admin access is granted even if a device is already in use and,
// depending on the DRA driver, may enable additional permissions
// when a container uses the allocated device.
//
// This feature gate is currently defined in KEP #4381. The intent
// is to move it into a separate KEP.
DRAAdminAccess featuregate.Feature = "DRAAdminAccess"
// owner: @pohly
// kep: http://kep.k8s.io/4381
//
// Enables support for resources with custom parameters and a lifecycle
// that is independent of a Pod. Resource allocation is done by the scheduler
// based on "structured parameters".
DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation"
// owner: @LionelJouin
// kep: http://kep.k8s.io/4817
// alpha: v1.32
//
// Enables support the ResourceClaim.status.devices field and for setting this
// status from DRA drivers.
DRAResourceClaimDeviceStatus featuregate.Feature = "DRAResourceClaimDeviceStatus"
// owner: @lauralorenz
// kep: https://kep.k8s.io/4603
// owner: @lauralorenz
// kep: https://kep.k8s.io/4603
//
// Enables support for configurable per-node backoff maximums for restarting
// containers (aka containers in CrashLoopBackOff)
KubeletCrashLoopBackOffMax featuregate.Feature = "KubeletCrashLoopBackOffMax"
// owner: @harche
// kep: http://kep.k8s.io/3386
//
// Allows using event-driven PLEG (pod lifecycle event generator) through kubelet
// which avoids frequent relisting of containers which helps optimize performance.
EventedPLEG featuregate.Feature = "EventedPLEG"
// owner: @andrewsykim @SergeyKanzhelev
//
// Ensure kubelet respects exec probe timeouts. Feature gate exists in-case existing workloads
// may depend on old behavior where exec probe timeouts were ignored.
// Lock to default and remove after v1.22 based on user feedback that should be reflected in KEP #1972 update
ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout"
// owner: @bobbypage
// Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown.
GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown"
// owner: @wzshiming
// Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown.
GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority"
// owner: @dxist
//
// Enables support of HPA scaling to zero pods when an object or custom metric is configured.
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
// owner: @deepakkinni @xing-yang
// kep: https://kep.k8s.io/2644
//
// Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC
// deletion ordering.
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
// owner: @vinaykul,@tallclair
// kep: http://kep.k8s.io/1287
//
// Enables In-Place Pod Vertical Scaling
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
// owner: @tallclair
// kep: http://kep.k8s.io/1287
//
// Enables the AllocatedResources field in container status. This feature requires
// InPlacePodVerticalScaling also be enabled.
InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus"
// owner: @tallclair @esotsal
// alpha: v1.32
//
// Allow resource resize for containers in Guaranteed pods with integer CPU requests ( default false ).
// Applies only in nodes with InPlacePodVerticalScaling and CPU Manager features enabled, and
// CPU Manager Static Policy option set.
InPlacePodVerticalScalingExclusiveCPUs featuregate.Feature = "InPlacePodVerticalScalingExclusiveCPUs"
// owner: @trierra
//
// Disables the Portworx in-tree driver.
InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister"
// owner: @mimowo
// kep: https://kep.k8s.io/3850
//
// Allows users to specify counting of failed pods per index.
JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex"
// owner: @mimowo
// kep: https://kep.k8s.io/4368
// alpha: v1.30
// beta: v1.32
//
// Allows to delegate reconciliation of a Job object to an external controller.
JobManagedBy featuregate.Feature = "JobManagedBy"
// owner: @mimowo
// kep: https://kep.k8s.io/3329
//
// Allow users to specify handling of pod failures based on container exit codes
// and pod conditions.
JobPodFailurePolicy featuregate.Feature = "JobPodFailurePolicy"
// owner: @kannon92
// kep : https://kep.k8s.io/3939
//
// Allow users to specify recreating pods of a job only when
// pods have fully terminated.
JobPodReplacementPolicy featuregate.Feature = "JobPodReplacementPolicy"
// owner: @tenzen-y
// kep: https://kep.k8s.io/3998
//
// Allow users to specify when a Job can be declared as succeeded
// based on the set of succeeded pods.
JobSuccessPolicy featuregate.Feature = "JobSuccessPolicy"
// owner: @marquiz
// kep: http://kep.k8s.io/4033
//
// Enable detection of the kubelet cgroup driver configuration option from
// the CRI. The CRI runtime also needs to support this feature in which
// case the kubelet will ignore the cgroupDriver (--cgroup-driver)
// configuration option. If runtime doesn't support it, the kubelet will
// fallback to using it's cgroupDriver option.
KubeletCgroupDriverFromCRI featuregate.Feature = "KubeletCgroupDriverFromCRI"
// owner: @vinayakankugoyal
// kep: http://kep.k8s.io/2862
//
// Enable fine-grained kubelet API authorization for webhook based
// authorization.
KubeletFineGrainedAuthz featuregate.Feature = "KubeletFineGrainedAuthz"
// owner: @AkihiroSuda
//
// Enables support for running kubelet in a user namespace.
// The user namespace has to be created before running kubelet.
// All the node components such as CRI need to be running in the same user namespace.
KubeletInUserNamespace featuregate.Feature = "KubeletInUserNamespace"
// owner: @moshe010
//
// Enable POD resources API to return resources allocated by Dynamic Resource Allocation
KubeletPodResourcesDynamicResources featuregate.Feature = "KubeletPodResourcesDynamicResources"
// owner: @moshe010
//
// Enable POD resources API with Get method
KubeletPodResourcesGet featuregate.Feature = "KubeletPodResourcesGet"
// owner: @kannon92
// kep: https://kep.k8s.io/4191
//
// The split image filesystem feature enables kubelet to perform garbage collection
// of images (read-only layers) and/or containers (writeable layers) deployed on
// separate filesystems.
KubeletSeparateDiskGC featuregate.Feature = "KubeletSeparateDiskGC"
// owner: @sallyom
// kep: https://kep.k8s.io/2832
//
// Add support for distributed tracing in the kubelet
KubeletTracing featuregate.Feature = "KubeletTracing"
// owner: @alexanderConstantinescu
// kep: http://kep.k8s.io/3836
//
// Implement connection draining for terminating nodes for
// `externalTrafficPolicy: Cluster` services.
KubeProxyDrainingTerminatingNodes featuregate.Feature = "KubeProxyDrainingTerminatingNodes"
// owner: @RobertKrawitz
//
// Allow use of filesystems for ephemeral storage monitoring.
// Only applies if LocalStorageCapacityIsolation is set.
// Relies on UserNamespacesSupport feature, and thus should follow it when setting defaults.
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
// owner: @damemi
//
// Enables scaling down replicas via logarithmic comparison of creation/ready timestamps
LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown"
// owner: @sanposhiho
// kep: https://kep.k8s.io/3633
//
// Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity.
MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity"
// owner: @denkensk
// kep: https://kep.k8s.io/3243
//
// Enable MatchLabelKeys in PodTopologySpread.
MatchLabelKeysInPodTopologySpread featuregate.Feature = "MatchLabelKeysInPodTopologySpread"
// owner: @krmayankk
//
// Enables maxUnavailable for StatefulSet
MaxUnavailableStatefulSet featuregate.Feature = "MaxUnavailableStatefulSet"
// owner: @cynepco3hahue(alukiano) @cezaryzukowski @k-wiatrzyk, @Tal-or (only for GA graduation)
//
// Allows setting memory affinity for a container based on NUMA topology
MemoryManager featuregate.Feature = "MemoryManager"
// owner: @xiaoxubeii
// kep: https://kep.k8s.io/2570
//
// Enables kubelet to support memory QoS with cgroups v2.
MemoryQoS featuregate.Feature = "MemoryQoS"
// owner: @aojea
// kep: https://kep.k8s.io/1880
//
// Enables the dynamic configuration of Service IP ranges
MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator"
// owner: @danwinship
// kep: https://kep.k8s.io/3866
//
// Allows running kube-proxy with `--mode nftables`.
NFTablesProxyMode featuregate.Feature = "NFTablesProxyMode"
// owner: @aravindhp @LorbusChris
// kep: http://kep.k8s.io/2271
//
// Enables querying logs of node services using the /logs endpoint. Enabling this feature has security implications.
// The recommendation is to enable it on a need basis for debugging purposes and disabling otherwise.
NodeLogQuery featuregate.Feature = "NodeLogQuery"
// owner: @iholder101 @kannon92
// kep: https://kep.k8s.io/2400
// Permits kubelet to run with swap enabled.
NodeSwap featuregate.Feature = "NodeSwap"
// owner: @mortent, @atiratree, @ravig
// kep: http://kep.k8s.io/3018
//
// Enables PDBUnhealthyPodEvictionPolicy for PodDisruptionBudgets
PDBUnhealthyPodEvictionPolicy featuregate.Feature = "PDBUnhealthyPodEvictionPolicy"
// owner: @RomanBednar
// kep: https://kep.k8s.io/3762
//
// Adds a new field to persistent volumes which holds a timestamp of when the volume last transitioned its phase.
PersistentVolumeLastPhaseTransitionTime featuregate.Feature = "PersistentVolumeLastPhaseTransitionTime"
// owner: @haircommander
// kep: https://kep.k8s.io/2364
//
// Configures the Kubelet to use the CRI to populate pod and container stats, instead of supplimenting with stats from cAdvisor.
// Requires the CRI implementation supports supplying the required stats.
PodAndContainerStatsFromCRI featuregate.Feature = "PodAndContainerStatsFromCRI"
// owner: @ahg-g
//
// Enables controlling pod ranking on replicaset scale-down.
PodDeletionCost featuregate.Feature = "PodDeletionCost"
// owner: @mimowo
// kep: https://kep.k8s.io/3329
//
// Enables support for appending a dedicated pod condition indicating that
// the pod is being deleted due to a disruption.
PodDisruptionConditions featuregate.Feature = "PodDisruptionConditions"
// owner: @danielvegamyhre
// kep: https://kep.k8s.io/4017
//
// Set pod completion index as a pod label for Indexed Jobs.
PodIndexLabel featuregate.Feature = "PodIndexLabel"
// owner: @knight42
// kep: https://kep.k8s.io/3288
// alpha: v1.32
//
// Enables only stdout or stderr of the container to be retrievd.
PodLogsQuerySplitStreams featuregate.Feature = "PodLogsQuerySplitStreams"
// owner: @ddebroy, @kannon92
//
// Enables reporting of PodReadyToStartContainersCondition condition in pod status after pod
// sandbox creation and network configuration completes successfully
PodReadyToStartContainersCondition featuregate.Feature = "PodReadyToStartContainersCondition"
// owner: @AxeZhan
// kep: http://kep.k8s.io/3960
//
// Enables SleepAction in container lifecycle hooks
PodLifecycleSleepAction featuregate.Feature = "PodLifecycleSleepAction"
// owner: @sreeram-venkitesh
// kep: http://kep.k8s.io/4818
//
// Allows zero value for sleep duration in SleepAction in container lifecycle hooks
PodLifecycleSleepActionAllowZero featuregate.Feature = "PodLifecycleSleepActionAllowZero"
// owner: @Huang-Wei
// kep: https://kep.k8s.io/3521
//
// Enable users to specify when a Pod is ready for scheduling.
PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness"
// owner: @seans3
// kep: http://kep.k8s.io/4006
//
// Enables PortForward to be proxied with a websocket client
PortForwardWebsockets featuregate.Feature = "PortForwardWebsockets"
// owner: @jessfraz
//
// Enables control over ProcMountType for containers.
ProcMountType featuregate.Feature = "ProcMountType"
// owner: @sjenning
//
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @gnufied
// kep: https://kep.k8s.io/1790
// beta - v1.32
//
// Allow users to recover from volume expansion failure
RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure"
// owner: @AkihiroSuda
// kep: https://kep.k8s.io/3857
//
// Allows recursive read-only mounts.
RecursiveReadOnlyMounts featuregate.Feature = "RecursiveReadOnlyMounts"
// owner: @adrianmoisey
// kep: https://kep.k8s.io/4427
//
// Relaxed DNS search string validation.
RelaxedDNSSearchValidation featuregate.Feature = "RelaxedDNSSearchValidation"
// owner: @HirazawaUi
// kep: https://kep.k8s.io/4369
//
// Allow almost all printable ASCII characters in environment variables
RelaxedEnvironmentVariableValidation featuregate.Feature = "RelaxedEnvironmentVariableValidation"
// owner: @zhangweikop
//
// Enable kubelet tls server to update certificate if the specified certificate files are changed.
// This feature is useful when specifying tlsCertFile & tlsPrivateKeyFile in kubelet Configuration.
// No effect for other cases such as using serverTLSbootstap.
ReloadKubeletServerCertificateFile featuregate.Feature = "ReloadKubeletServerCertificateFile"
// owner: @SergeyKanzhelev
// kep: https://kep.k8s.io/4680
//
// Adds the AllocatedResourcesStatus to the container status.
ResourceHealthStatus featuregate.Feature = "ResourceHealthStatus"
// owner: @mikedanese
//
// Gets a server certificate for the kubelet from the Certificate Signing
// Request API instead of generating one self signed and auto rotates the
// certificate as expiration approaches.
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
// owner: @kiashok
// kep: https://kep.k8s.io/4216
//
// Adds support to pull images based on the runtime class specified.
RuntimeClassInImageCriAPI featuregate.Feature = "RuntimeClassInImageCriApi"
// owner: @danielvegamyhre
// kep: https://kep.k8s.io/2413
//
// Allows mutating spec.completions for Indexed job when done in tandem with
// spec.parallelism. Specifically, spec.completions is mutable iff spec.completions
// equals to spec.parallelism before and after the update.
ElasticIndexedJob featuregate.Feature = "ElasticIndexedJob"
// owner: @sanposhiho
// kep: http://kep.k8s.io/4247
//
// Enables the scheduler's enhancement called QueueingHints,
// which benefits to reduce the useless requeueing.
SchedulerQueueingHints featuregate.Feature = "SchedulerQueueingHints"
// owner: @sanposhiho
// kep: http://kep.k8s.io/4832
// alpha: v1.32
//
// Running some expensive operation within the scheduler's preemption asynchronously,
// which improves the scheduling latency when the preemption involves in.
SchedulerAsyncPreemption featuregate.Feature = "SchedulerAsyncPreemption"
// owner: @atosatto @yuanchen8911
// kep: http://kep.k8s.io/3902
//
// Decouples Taint Eviction Controller, performing taint-based Pod eviction, from Node Lifecycle Controller.
SeparateTaintEvictionController featuregate.Feature = "SeparateTaintEvictionController"
// owner: @aramase
// kep: https://kep.k8s.io/4412
//
// ServiceAccountNodeAudienceRestriction is used to restrict the audience for which the
// kubelet can request a service account token for.
ServiceAccountNodeAudienceRestriction featuregate.Feature = "ServiceAccountNodeAudienceRestriction"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether JTIs (UUIDs) are embedded into generated service account tokens, and whether these JTIs are
// recorded into the audit log for future requests made by these tokens.
ServiceAccountTokenJTI featuregate.Feature = "ServiceAccountTokenJTI"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether the apiserver supports binding service account tokens to Node objects.
ServiceAccountTokenNodeBinding featuregate.Feature = "ServiceAccountTokenNodeBinding"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether the apiserver will validate Node claims in service account tokens.
ServiceAccountTokenNodeBindingValidation featuregate.Feature = "ServiceAccountTokenNodeBindingValidation"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether the apiserver embeds the node name and uid for the associated node when issuing
// service account tokens bound to Pod objects.
ServiceAccountTokenPodNodeInfo featuregate.Feature = "ServiceAccountTokenPodNodeInfo"
// owner: @gauravkghildiyal @robscott
// kep: https://kep.k8s.io/4444
//
// Enables trafficDistribution field on Services.
ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution"
// owner: @gjkim42 @SergeyKanzhelev @matthyx @tzneal
// kep: http://kep.k8s.io/753
//
// Introduces sidecar containers, a new type of init container that starts
// before other containers but remains running for the full duration of the
// pod's lifecycle and will not block pod termination.
SidecarContainers featuregate.Feature = "SidecarContainers"
// owner: @derekwaynecarr
//
// Enables kubelet support to size memory backed volumes
// This is a kubelet only feature gate.
// Code can be removed in 1.35 without any consideration for emulated versions.
SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes"
// owner: @mattcary
//
// Enables policies controlling deletion of PVCs created by a StatefulSet.
StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC"
// owner: @psch
//
// Enables a StatefulSet to start from an arbitrary non zero ordinal
StatefulSetStartOrdinal featuregate.Feature = "StatefulSetStartOrdinal"
// owner: @ahutsunshine
//
// Allows namespace indexer for namespace scope resources in apiserver cache to accelerate list operations.
StorageNamespaceIndex featuregate.Feature = "StorageNamespaceIndex"
// owner: @nilekhc
// kep: https://kep.k8s.io/4192
// Enables support for the StorageVersionMigrator controller.
StorageVersionMigrator featuregate.Feature = "StorageVersionMigrator"
// owner: @robscott
// kep: https://kep.k8s.io/2433
//
// Enables topology aware hints for EndpointSlices
TopologyAwareHints featuregate.Feature = "TopologyAwareHints"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
//
// Allow fine-tuning of topology manager policies with alpha options.
// This feature gate:
// - will guard *a group* of topology manager options whose quality level is alpha.
// - will never graduate to beta or stable.
TopologyManagerPolicyAlphaOptions featuregate.Feature = "TopologyManagerPolicyAlphaOptions"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
//
// Allow fine-tuning of topology manager policies with beta options.
// This feature gate:
// - will guard *a group* of topology manager options whose quality level is beta.
// - is thus *introduced* as beta
// - will never graduate to stable.
TopologyManagerPolicyBetaOptions featuregate.Feature = "TopologyManagerPolicyBetaOptions"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
//
// Allow the usage of options to fine-tune the topology manager policies.
TopologyManagerPolicyOptions featuregate.Feature = "TopologyManagerPolicyOptions"
// owner: @seans3
// kep: http://kep.k8s.io/4006
//
// Enables StreamTranslator proxy to handle WebSockets upgrade requests for the
// version of the RemoteCommand subprotocol that supports the "close" signal.
TranslateStreamCloseWebsocketRequests featuregate.Feature = "TranslateStreamCloseWebsocketRequests"
// owner: @richabanker
//
// Proxies client to an apiserver capable of serving the request in the event of version skew.
UnknownVersionInteroperabilityProxy featuregate.Feature = "UnknownVersionInteroperabilityProxy"
// owner: @rata, @giuseppe
// kep: https://kep.k8s.io/127
//
// Enables user namespace support for stateless pods.
UserNamespacesSupport featuregate.Feature = "UserNamespacesSupport"
// owner: @mattcarry, @sunnylovestiramisu
// kep: https://kep.k8s.io/3751
//
// Enables user specified volume attributes for persistent volumes, like iops and throughput.
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
// owner: @cofyc
VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority"
// owner: @ksubrmnn
//
// Allows kube-proxy to create DSR loadbalancers for Windows
WinDSR featuregate.Feature = "WinDSR"
// owner: @zylxjtu
// kep: https://kep.k8s.io/4802
// alpha: v1.32
//
// Enables support for graceful shutdown windows node.
WindowsGracefulNodeShutdown featuregate.Feature = "WindowsGracefulNodeShutdown"
// owner: @ksubrmnn
//
// Allows kube-proxy to run in Overlay mode for Windows
WinOverlay featuregate.Feature = "WinOverlay"
// owner: @jsturtevant
// kep: https://kep.k8s.io/4888
//
// Add CPU and Memory Affinity support to Windows nodes with CPUManager, MemoryManager and Topology manager
WindowsCPUAndMemoryAffinity featuregate.Feature = "WindowsCPUAndMemoryAffinity"
// owner: @marosset
// kep: https://kep.k8s.io/3503
//
// Enables support for joining Windows containers to a hosts' network namespace.
WindowsHostNetwork featuregate.Feature = "WindowsHostNetwork"
// owner: @kerthcet
// kep: https://kep.k8s.io/3094
//
// Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when
// calculating pod topology spread skew.
NodeInclusionPolicyInPodTopologySpread featuregate.Feature = "NodeInclusionPolicyInPodTopologySpread"
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
// Initial implementation focused on ReadWriteOncePod volumes.
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
// owner: @Sh4d1,@RyanAoh,@rikatz
// kep: http://kep.k8s.io/1860
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode"
// owner: @haircommander
// kep: http://kep.k8s.io/4210
// ImageMaximumGCAge enables the Kubelet configuration field of the same name, allowing an admin
// to specify the age after which an image will be garbage collected.
ImageMaximumGCAge featuregate.Feature = "ImageMaximumGCAge"
// owner: @saschagrunert
//
// Enables user namespace support for Pod Security Standards. Enabling this
// feature will modify all Pod Security Standard rules to allow setting:
// spec[.*].securityContext.[runAsNonRoot,runAsUser]
// This feature gate should only be enabled if all nodes in the cluster
// support the user namespace feature and have it enabled. The feature gate
// will not graduate or be enabled by default in future Kubernetes
// releases.
UserNamespacesPodSecurityStandards featuregate.Feature = "UserNamespacesPodSecurityStandards"
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
SELinuxMount featuregate.Feature = "SELinuxMount"
// owner: @everpeace
// kep: https://kep.k8s.io/3619
//
// Enable SupplementalGroupsPolicy feature in PodSecurityContext
SupplementalGroupsPolicy featuregate.Feature = "SupplementalGroupsPolicy"
// owner: @saschagrunert
// kep: https://kep.k8s.io/4639
//
// Enables the image volume source.
ImageVolume featuregate.Feature = "ImageVolume"
// owner: @zhifei92
// beta: v1.32
//
// Enables the systemd watchdog for the kubelet. When enabled, the kubelet will
// periodically notify the systemd watchdog to indicate that it is still alive.
// This can help prevent the system from restarting the kubelet if it becomes
// unresponsive. The feature gate is enabled by default, but should only be used
// if the system supports the systemd watchdog feature and has it configured properly.
SystemdWatchdog = featuregate.Feature("SystemdWatchdog")
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
// alpha: v1.32
//
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
// Enables the SELinuxChangePolicy field in PodSecurityContext before SELinuxMount featgure gate is enabled.
SELinuxChangePolicy featuregate.Feature = "SELinuxChangePolicy"
// owner: @HarshalNeelkamal
// alpha: v1.32
//
// Enables external service account JWT signing and key management.
// If enabled, it allows passing --service-account-signing-endpoint flag to configure external signer.
ExternalServiceAccountTokenSigner featuregate.Feature = "ExternalServiceAccountTokenSigner"
// owner: @ndixita
// key: https://kep.k8s.io/2837
// alpha: 1.32
//
// Enables specifying resources at pod-level.
PodLevelResources featuregate.Feature = "PodLevelResources"
)
func init() {
runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates))
runtime.Must(zpagesfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate))
// Register all client-go features with kube's feature gate instance and make all client-go
// feature checks use kube's instance. The effect is that for kube binaries, client-go
// features are wired to the existing --feature-gates flag just as all other features
// are. Further, client-go features automatically support the existing mechanisms for
// feature enablement metrics and test overrides.
ca := &clientAdapter{utilfeature.DefaultMutableFeatureGate}
runtime.Must(clientfeatures.AddFeaturesToExistingFeatureGates(ca))
clientfeatures.ReplaceFeatureGates(ca)
}
// defaultKubernetesFeatureGates consists of legacy unversioned Kubernetes-specific feature keys.
// Please do not add to this file and use pkg/features/versioned_kube_features.go instead.
//
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
// when adding or removing one entry.
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{}

View File

@ -0,0 +1,837 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
"k8s.io/apimachinery/pkg/util/version"
genericfeatures "k8s.io/apiserver/pkg/features"
"k8s.io/component-base/featuregate"
zpagesfeatures "k8s.io/component-base/zpages/features"
kcmfeatures "k8s.io/controller-manager/pkg/features"
)
// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
// To add a new feature, define a key for it in pkg/features/kube_features.go and add it here. The features will be
// available throughout Kubernetes binaries.
// For features available via specific kubernetes components like apiserver,
// cloud-controller-manager, etc find the respective kube_features.go file
// (eg:staging/src/apiserver/pkg/features/kube_features.go), define the versioned
// feature gate there, and reference it in this file.
// To support n-3 compatibility version, features may only be removed 3 releases after graduation.
//
// Entries are alphabetized.
var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
AllowDNSOnlyNodeCSR: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
},
AllowInsecureKubeletCertificateSigningRequests: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
},
AllowOverwriteTerminationGracePeriodSeconds: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated},
},
AllowServiceLBStatusOnNonLB: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.35
},
AnyVolumeDataSource: {
{Version: version.MustParse("1.18"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
},
AppArmor: {
{Version: version.MustParse("1.4"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
AppArmorFields: {
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
AuthorizeNodeWithSelectors: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
kcmfeatures.CloudControllerManagerWebhook: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
},
ClusterTrustBundle: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
},
ClusterTrustBundleProjection: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
},
ContainerCheckpoint: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
CPUCFSQuotaPeriod: {
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
},
CPUManager: {
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.10"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26
},
CPUManagerPolicyAlphaOptions: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
},
CPUManagerPolicyBetaOptions: {
{Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta},
},
CPUManagerPolicyOptions: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta},
},
CronJobsScheduledAnnotation: {
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35
},
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CRDValidationRatcheting: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
CrossNamespaceVolumeDataSource: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
},
CSIMigrationPortworx: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, // On by default (requires Portworx CSI driver)
},
CSIVolumeHealth: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
},
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceFieldSelectors: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
},
DevicePluginCDIDevices: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
DisableAllocatorDualWrite: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, // remove after MultiCIDRServiceAllocator is GA
},
DisableCloudProviders: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
DisableKubeletCloudCredentialProviders: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
DisableNodeKubeProxyVersion: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
},
DRAAdminAccess: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
DynamicResourceAllocation: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta},
},
DRAResourceClaimDeviceStatus: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletCrashLoopBackOffMax: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
ElasticIndexedJob: {
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.32
},
EventedPLEG: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
},
ExecProbeTimeout: {
{Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
},
ExternalServiceAccountTokenSigner: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.AdmissionWebhookMatchConditions: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.AggregatedDiscoveryEndpoint: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.AllowUnsafeMalformedObjectDeletion: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.AnonymousAuthConfigurableEndpoints: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.APIListChunking: {
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.9"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.APIResponseCompression: {
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.APIServerIdentity: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.APIServerTracing: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.APIServingWithRoutine: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.AuthorizeWithSelectors: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.BtreeWatchCache: {
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.CBORServingAndStorage: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.ConcurrentWatchObjectDecode: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.ConsistentListFromCache: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.CoordinatedLeaderElection: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.EfficientWatchResumption: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.KMSv1: {
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated},
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
},
genericfeatures.MutatingAdmissionPolicy: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.OpenAPIEnums: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.RemainingItemCount: {
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.RemoteRequestHeaderUID: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.ResilientWatchCacheInitialization: {
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.RetryGenerateName: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
},
genericfeatures.SeparateCacheWatchRPC: {
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.StorageVersionAPI: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.StorageVersionHash: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.StrictCostEnforcementForVAP: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.StrictCostEnforcementForWebhooks: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.StructuredAuthenticationConfiguration: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.StructuredAuthorizationConfiguration: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.UnauthenticatedHTTP2DOSMitigation: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.WatchBookmark: {
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.17"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.WatchCacheInitializationPostStartHook: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.WatchFromStorageWithoutResourceVersion: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.WatchList: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
GracefulNodeShutdown: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta},
},
GracefulNodeShutdownBasedOnPodPriority: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
},
HonorPVReclaimPolicy: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
HPAScaleToZero: {
{Version: version.MustParse("1.16"), Default: false, PreRelease: featuregate.Alpha},
},
ImageMaximumGCAge: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
ImageVolume: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
},
InPlacePodVerticalScaling: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
},
InPlacePodVerticalScalingAllocatedStatus: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
InPlacePodVerticalScalingExclusiveCPUs: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
InTreePluginPortworxUnregister: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
},
JobBackoffLimitPerIndex: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
JobManagedBy: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
JobPodFailurePolicy: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
JobPodReplacementPolicy: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
JobSuccessPolicy: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
KubeletCgroupDriverFromCRI: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
KubeletFineGrainedAuthz: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletInUserNamespace: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletPodResourcesDynamicResources: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletPodResourcesGet: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletRegistrationGetOnExistsOnly: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated},
},
KubeletSeparateDiskGC: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
KubeletTracing: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
},
KubeProxyDrainingTerminatingNodes: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31; remove in 1.33
},
LoadBalancerIPMode: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
LocalStorageCapacityIsolationFSQuotaMonitoring: {
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
LogarithmicScaleDown: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
MatchLabelKeysInPodAffinity: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
MatchLabelKeysInPodTopologySpread: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
},
MaxUnavailableStatefulSet: {
{Version: version.MustParse("1.24"), Default: false, PreRelease: featuregate.Alpha},
},
MemoryManager: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
MemoryQoS: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
},
MultiCIDRServiceAllocator: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
NFTablesProxyMode: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
NodeInclusionPolicyInPodTopologySpread: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
},
NodeLogQuery: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
},
NodeSwap: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
PDBUnhealthyPodEvictionPolicy: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
PersistentVolumeLastPhaseTransitionTime: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
PodAndContainerStatsFromCRI: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
},
PodDeletionCost: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
},
PodDisruptionConditions: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
},
PodIndexLabel: {
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35
},
PodLevelResources: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
PodLifecycleSleepAction: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
PodReadyToStartContainersCondition: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
PodLifecycleSleepActionAllowZero: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
PodSchedulingReadiness: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32
},
PortForwardWebsockets: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
ProcMountType: {
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
QOSReserved: {
{Version: version.MustParse("1.11"), Default: false, PreRelease: featuregate.Alpha},
},
RecoverVolumeExpansionFailure: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
RecursiveReadOnlyMounts: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
RelaxedDNSSearchValidation: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
RelaxedEnvironmentVariableValidation: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
ReloadKubeletServerCertificateFile: {
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
ResourceHealthStatus: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
},
RotateKubeletServerCertificate: {
{Version: version.MustParse("1.7"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.12"), Default: true, PreRelease: featuregate.Beta},
},
RuntimeClassInImageCriAPI: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
},
SchedulerAsyncPreemption: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
SchedulerQueueingHints: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
SELinuxChangePolicy: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
SELinuxMount: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
},
SELinuxMountReadWriteOncePod: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
},
SeparateTaintEvictionController: {
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
StorageNamespaceIndex: {
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
ServiceAccountNodeAudienceRestriction: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta},
},
ServiceAccountTokenJTI: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34
},
ServiceAccountTokenNodeBinding: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
ServiceAccountTokenNodeBindingValidation: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34
},
ServiceAccountTokenPodNodeInfo: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34
},
ServiceTrafficDistribution: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
SidecarContainers: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
SizeMemoryBackedVolumes: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
},
PodLogsQuerySplitStreams: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
StatefulSetAutoDeletePVC: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.32, remove in 1.35
},
StatefulSetStartOrdinal: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33
},
StorageVersionMigrator: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
},
SupplementalGroupsPolicy: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
},
SystemdWatchdog: {
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
TopologyAwareHints: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
},
TopologyManagerPolicyAlphaOptions: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
},
TopologyManagerPolicyBetaOptions: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
},
TopologyManagerPolicyOptions: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA},
},
TranslateStreamCloseWebsocketRequests: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
UnknownVersionInteroperabilityProxy: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
},
UserNamespacesPodSecurityStandards: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
},
UserNamespacesSupport: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
},
VolumeAttributesClass: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
VolumeCapacityPriority: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
},
WinDSR: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
},
WindowsGracefulNodeShutdown: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
WinOverlay: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.Beta},
},
WindowsCPUAndMemoryAffinity: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
WindowsHostNetwork: {
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Alpha},
},
zpagesfeatures.ComponentFlagz: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
zpagesfeatures.ComponentStatusz: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
}

19
e2e/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fieldpath supplies methods for extracting fields from objects
// given a path to a field.
package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath"

120
e2e/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fieldpath
import (
"fmt"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/validation"
)
// FormatMap formats map[string]string to a string.
func FormatMap(m map[string]string) (fmtStr string) {
// output with keys in sorted order to provide stable output
keys := make([]string, 0, len(m))
var grow int
for k, v := range m {
keys = append(keys, k)
// why add 4: (for =, \n, " and ")
grow += len(k) + len(v) + 4
}
sort.Strings(keys)
// allocate space to avoid expansion
dst := make([]byte, 0, grow)
for _, key := range keys {
if len(dst) > 0 {
dst = append(dst, '\n')
}
dst = append(dst, key...)
dst = append(dst, '=')
dst = strconv.AppendQuote(dst, m[key])
}
return string(dst)
}
// ExtractFieldPathAsString extracts the field from the given object
// and returns it as a string. The object must be a pointer to an
// API type.
func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok {
switch path {
case "metadata.annotations":
if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetAnnotations()[subscript], nil
case "metadata.labels":
if errs := validation.IsQualifiedName(subscript); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetLabels()[subscript], nil
default:
return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath)
}
}
switch fieldPath {
case "metadata.annotations":
return FormatMap(accessor.GetAnnotations()), nil
case "metadata.labels":
return FormatMap(accessor.GetLabels()), nil
case "metadata.name":
return accessor.GetName(), nil
case "metadata.namespace":
return accessor.GetNamespace(), nil
case "metadata.uid":
return string(accessor.GetUID()), nil
}
return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath)
}
// SplitMaybeSubscriptedPath checks whether the specified fieldPath is
// subscripted, and
// - if yes, this function splits the fieldPath into path and subscript, and
// returns (path, subscript, true).
// - if no, this function returns (fieldPath, "", false).
//
// Example inputs and outputs:
//
// "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true)
// "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true)
// "metadata.labels['']" --> ("metadata.labels", "", true)
// "metadata.labels" --> ("metadata.labels", "", false)
func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) {
if !strings.HasSuffix(fieldPath, "']") {
return fieldPath, "", false
}
s := strings.TrimSuffix(fieldPath, "']")
parts := strings.SplitN(s, "['", 2)
if len(parts) < 2 {
return fieldPath, "", false
}
if len(parts[0]) == 0 {
return fieldPath, "", false
}
return parts[0], parts[1], true
}

View File

@ -0,0 +1,9 @@
# See the OWNERS docs at https://go.k8s.io/owners
# Disable inheritance as this is an api owners file
options:
no_parent_owners: true
approvers:
- api-approvers
reviewers:
- sig-node-api-reviewers

View File

@ -0,0 +1,20 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=kubelet.config.k8s.io
package config // import "k8s.io/kubernetes/pkg/kubelet/apis/config"

View File

@ -0,0 +1,32 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// KubeletConfigurationPathRefs returns pointers to all of the KubeletConfiguration fields that contain filepaths.
// You might use this, for example, to resolve all relative paths against some common root before
// passing the configuration to the application. This method must be kept up to date as new fields are added.
func KubeletConfigurationPathRefs(kc *KubeletConfiguration) []*string {
paths := []*string{}
paths = append(paths, &kc.StaticPodPath)
paths = append(paths, &kc.Authentication.X509.ClientCAFile)
paths = append(paths, &kc.TLSCertFile)
paths = append(paths, &kc.TLSPrivateKeyFile)
paths = append(paths, &kc.ResolverConfig)
paths = append(paths, &kc.VolumePluginDir)
paths = append(paths, &kc.PodLogsDir)
return paths
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "kubelet.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeletConfiguration{},
&SerializedNodeConfigSource{},
&CredentialProviderConfig{},
)
return nil
}

View File

@ -0,0 +1,704 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
logsapi "k8s.io/component-base/logs/api/v1"
tracingapi "k8s.io/component-base/tracing/api/v1"
)
// HairpinMode denotes how the kubelet should configure networking to handle
// hairpin packets.
type HairpinMode string
// Enum settings for different ways to handle hairpin packets.
const (
// Set the hairpin flag on the veth of containers in the respective
// container runtime.
HairpinVeth = "hairpin-veth"
// Make the container bridge promiscuous. This will force it to accept
// hairpin packets, even if the flag isn't set on ports of the bridge.
PromiscuousBridge = "promiscuous-bridge"
// Neither of the above. If the kubelet is started in this hairpin mode
// and kube-proxy is running in iptables mode, hairpin packets will be
// dropped by the container bridge.
HairpinNone = "none"
)
// ResourceChangeDetectionStrategy denotes a mode in which internal
// managers (secret, configmap) are discovering object changes.
type ResourceChangeDetectionStrategy string
// Enum settings for different strategies of kubelet managers.
const (
// GetChangeDetectionStrategy is a mode in which kubelet fetches
// necessary objects directly from apiserver.
GetChangeDetectionStrategy ResourceChangeDetectionStrategy = "Get"
// TTLCacheChangeDetectionStrategy is a mode in which kubelet uses
// ttl cache for object directly fetched from apiserver.
TTLCacheChangeDetectionStrategy ResourceChangeDetectionStrategy = "Cache"
// WatchChangeDetectionStrategy is a mode in which kubelet uses
// watches to observe changes to objects that are in its interest.
WatchChangeDetectionStrategy ResourceChangeDetectionStrategy = "Watch"
// RestrictedTopologyManagerPolicy is a mode in which kubelet only allows
// pods with optimal NUMA node alignment for requested resources
RestrictedTopologyManagerPolicy = "restricted"
// BestEffortTopologyManagerPolicy is a mode in which kubelet will favour
// pods with NUMA alignment of CPU and device resources.
BestEffortTopologyManagerPolicy = "best-effort"
// NoneTopologyManagerPolicy is a mode in which kubelet has no knowledge
// of NUMA alignment of a pod's CPU and device resources.
NoneTopologyManagerPolicy = "none"
// SingleNumaNodeTopologyManagerPolicy is a mode in which kubelet only allows
// pods with a single NUMA alignment of CPU and device resources.
SingleNumaNodeTopologyManagerPolicy = "single-numa-node"
// ContainerTopologyManagerScope represents that
// topology policy is applied on a per-container basis.
ContainerTopologyManagerScope = "container"
// PodTopologyManagerScope represents that
// topology policy is applied on a per-pod basis.
PodTopologyManagerScope = "pod"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeletConfiguration contains the configuration for the Kubelet
type KubeletConfiguration struct {
metav1.TypeMeta
// enableServer enables Kubelet's secured server.
// Note: Kubelet's insecure port is controlled by the readOnlyPort option.
EnableServer bool
// staticPodPath is the path to the directory containing local (static) pods to
// run, or the path to a single static pod file.
StaticPodPath string
// podLogsDir is a custom root directory path kubelet will use to place pod's log files.
// Default: "/var/log/pods/"
// Note: it is not recommended to use the temp folder as a log directory as it may cause
// unexpected behavior in many places.
PodLogsDir string
// syncFrequency is the max period between synchronizing running
// containers and config
SyncFrequency metav1.Duration
// fileCheckFrequency is the duration between checking config files for
// new data
FileCheckFrequency metav1.Duration
// httpCheckFrequency is the duration between checking http for new data
HTTPCheckFrequency metav1.Duration
// staticPodURL is the URL for accessing static pods to run
StaticPodURL string
// staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL
StaticPodURLHeader map[string][]string `datapolicy:"token"`
// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
// for all interfaces)
Address string
// port is the port for the Kubelet to serve on.
Port int32
// readOnlyPort is the read-only port for the Kubelet to serve on with
// no authentication/authorization (set to 0 to disable)
ReadOnlyPort int32
// volumePluginDir is the full path of the directory in which to search
// for additional third party volume plugins.
VolumePluginDir string
// providerID, if set, sets the unique id of the instance that an external provider (i.e. cloudprovider)
// can use to identify a specific node
ProviderID string
// tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert,
// if any, concatenated after server cert). If tlsCertFile and
// tlsPrivateKeyFile are not provided, a self-signed certificate
// and key are generated for the public address and saved to the directory
// passed to the Kubelet's --cert-dir flag.
TLSCertFile string
// tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile
TLSPrivateKeyFile string
// TLSCipherSuites is the list of allowed cipher suites for the server.
// Note that TLS 1.3 ciphersuites are not configurable.
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
TLSCipherSuites []string
// TLSMinVersion is the minimum TLS version supported.
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
TLSMinVersion string
// rotateCertificates enables client certificate rotation. The Kubelet will request a
// new certificate from the certificates.k8s.io API. This requires an approver to approve the
// certificate signing requests.
RotateCertificates bool
// serverTLSBootstrap enables server certificate bootstrap. Instead of self
// signing a serving certificate, the Kubelet will request a certificate from
// the certificates.k8s.io API. This requires an approver to approve the
// certificate signing requests. The RotateKubeletServerCertificate feature
// must be enabled.
ServerTLSBootstrap bool
// authentication specifies how requests to the Kubelet's server are authenticated
Authentication KubeletAuthentication
// authorization specifies how requests to the Kubelet's server are authorized
Authorization KubeletAuthorization
// registryPullQPS is the limit of registry pulls per second.
// Set to 0 for no limit.
RegistryPullQPS int32
// registryBurst is the maximum size of bursty pulls, temporarily allows
// pulls to burst to this number, while still not exceeding registryPullQPS.
// Only used if registryPullQPS > 0.
RegistryBurst int32
// eventRecordQPS is the maximum event creations per second. If 0, there
// is no limit enforced.
EventRecordQPS int32
// eventBurst is the maximum size of a burst of event creations, temporarily
// allows event creations to burst to this number, while still not exceeding
// eventRecordQPS. Only used if eventRecordQPS > 0.
EventBurst int32
// enableDebuggingHandlers enables server endpoints for log collection
// and local running of containers and commands
EnableDebuggingHandlers bool
// enableContentionProfiling enables block profiling, if enableDebuggingHandlers is true.
EnableContentionProfiling bool
// healthzPort is the port of the localhost healthz endpoint (set to 0 to disable)
HealthzPort int32
// healthzBindAddress is the IP address for the healthz server to serve on
HealthzBindAddress string
// oomScoreAdj is The oom-score-adj value for kubelet process. Values
// must be within the range [-1000, 1000].
OOMScoreAdj int32
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
// configure all containers to search this domain in addition to the
// host's search domains.
ClusterDomain string
// clusterDNS is a list of IP addresses for a cluster DNS server. If set,
// kubelet will configure all containers to use this for DNS resolution
// instead of the host's DNS servers.
ClusterDNS []string
// streamingConnectionIdleTimeout is the maximum time a streaming connection
// can be idle before the connection is automatically closed.
StreamingConnectionIdleTimeout metav1.Duration
// nodeStatusUpdateFrequency is the frequency that kubelet computes node
// status. If node lease feature is not enabled, it is also the frequency that
// kubelet posts node status to master. In that case, be cautious when
// changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller.
NodeStatusUpdateFrequency metav1.Duration
// nodeStatusReportFrequency is the frequency that kubelet posts node
// status to master if node status does not change. Kubelet will ignore this
// frequency and post node status immediately if any change is detected. It is
// only used when node lease feature is enabled.
NodeStatusReportFrequency metav1.Duration
// nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease.
NodeLeaseDurationSeconds int32
// ImageMinimumGCAge is the minimum age for an unused image before it is
// garbage collected.
ImageMinimumGCAge metav1.Duration
// ImageMaximumGCAge is the maximum age an image can be unused before it is garbage collected.
// The default of this field is "0s", which disables this field--meaning images won't be garbage
// collected based on being unused for too long.
ImageMaximumGCAge metav1.Duration
// imageGCHighThresholdPercent is the percent of disk usage after which
// image garbage collection is always run. The percent is calculated as
// this field value out of 100.
ImageGCHighThresholdPercent int32
// imageGCLowThresholdPercent is the percent of disk usage before which
// image garbage collection is never run. Lowest disk usage to garbage
// collect to. The percent is calculated as this field value out of 100.
ImageGCLowThresholdPercent int32
// How frequently to calculate and cache volume disk usage for all pods
VolumeStatsAggPeriod metav1.Duration
// KubeletCgroups is the absolute name of cgroups to isolate the kubelet in
KubeletCgroups string
// SystemCgroups is absolute name of cgroups in which to place
// all non-kernel processes that are not already in a container. Empty
// for no container. Rolling back the flag requires a reboot.
SystemCgroups string
// CgroupRoot is the root cgroup to use for pods.
// If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
CgroupRoot string
// Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes
// And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup.
CgroupsPerQOS bool
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
CgroupDriver string
// SingleProcessOOMKill, if true, will prevent the `memory.oom.group` flag from being set for container
// cgroups in cgroups v2. This causes processes in the container to be OOM killed individually instead of as
// a group. It means that if true, the behavior aligns with the behavior of cgroups v1.
SingleProcessOOMKill *bool
// CPUManagerPolicy is the name of the policy to use.
// Requires the CPUManager feature gate to be enabled.
CPUManagerPolicy string
// CPUManagerPolicyOptions is a set of key=value which allows to set extra options
// to fine tune the behaviour of the cpu manager policies.
// Requires both the "CPUManager" and "CPUManagerPolicyOptions" feature gates to be enabled.
CPUManagerPolicyOptions map[string]string
// CPU Manager reconciliation period.
// Requires the CPUManager feature gate to be enabled.
CPUManagerReconcilePeriod metav1.Duration
// MemoryManagerPolicy is the name of the policy to use.
// Requires the MemoryManager feature gate to be enabled.
MemoryManagerPolicy string
// TopologyManagerPolicy is the name of the policy to use.
TopologyManagerPolicy string
// TopologyManagerScope represents the scope of topology hint generation
// that topology manager requests and hint providers generate.
// Default: "container"
// +optional
TopologyManagerScope string
// TopologyManagerPolicyOptions is a set of key=value which allows to set extra options
// to fine tune the behaviour of the topology manager policies.
// Requires both the "TopologyManager" and "TopologyManagerPolicyOptions" feature gates to be enabled.
TopologyManagerPolicyOptions map[string]string
// Map of QoS resource reservation percentages (memory only for now).
// Requires the QOSReserved feature gate to be enabled.
QOSReserved map[string]string
// runtimeRequestTimeout is the timeout for all runtime requests except long running
// requests - pull, logs, exec and attach.
RuntimeRequestTimeout metav1.Duration
// hairpinMode specifies how the Kubelet should configure the container
// bridge for hairpin packets.
// Setting this flag allows endpoints in a Service to loadbalance back to
// themselves if they should try to access their own Service. Values:
// "promiscuous-bridge": make the container bridge promiscuous.
// "hairpin-veth": set the hairpin flag on container veth interfaces.
// "none": do nothing.
// Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT,
// because promiscuous-bridge assumes the existence of a container bridge named cbr0.
HairpinMode string
// maxPods is the number of pods that can run on this Kubelet.
MaxPods int32
// The CIDR to use for pod IP addresses, only used in standalone mode.
// In cluster mode, this is obtained from the master.
PodCIDR string
// The maximum number of processes per pod. If -1, the kubelet defaults to the node allocatable pid capacity.
PodPidsLimit int64
// ResolverConfig is the resolver configuration file used as the basis
// for the container DNS resolution configuration.
ResolverConfig string
// RunOnce causes the Kubelet to check the API server once for pods,
// run those in addition to the pods specified by static pod files, and exit.
// Deprecated: no longer has any effect.
RunOnce bool
// cpuCFSQuota enables CPU CFS quota enforcement for containers that
// specify CPU limits
CPUCFSQuota bool
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
CPUCFSQuotaPeriod metav1.Duration
// maxOpenFiles is Number of files that can be opened by Kubelet process.
MaxOpenFiles int64
// nodeStatusMaxImages caps the number of images reported in Node.Status.Images.
NodeStatusMaxImages int32
// contentType is contentType of requests sent to apiserver.
ContentType string
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver
KubeAPIQPS int32
// kubeAPIBurst is the burst to allow while talking with kubernetes
// apiserver
KubeAPIBurst int32
// serializeImagePulls when enabled, tells the Kubelet to pull images one at a time.
SerializeImagePulls bool
// MaxParallelImagePulls sets the maximum number of image pulls in parallel.
MaxParallelImagePulls *int32
// Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}.
// Some default signals are Linux only: nodefs.inodesFree
EvictionHard map[string]string
// Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}.
EvictionSoft map[string]string
// Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}.
EvictionSoftGracePeriod map[string]string
// Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
EvictionPressureTransitionPeriod metav1.Duration
// Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
EvictionMaxPodGracePeriod int32
// Map of signal names to quantities that defines minimum reclaims, which describe the minimum
// amount of a given resource the kubelet will reclaim when performing a pod eviction while
// that resource is under pressure. For example: {"imagefs.available": "2Gi"}
EvictionMinimumReclaim map[string]string
// podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods.
// If 0, this field is ignored.
PodsPerCore int32
// enableControllerAttachDetach enables the Attach/Detach controller to
// manage attachment/detachment of volumes scheduled to this node, and
// disables kubelet from executing any attach/detach operations
EnableControllerAttachDetach bool
// protectKernelDefaults, if true, causes the Kubelet to error if kernel
// flags are not as it expects. Otherwise the Kubelet will attempt to modify
// kernel flags to match its expectation.
ProtectKernelDefaults bool
// If true, Kubelet creates the KUBE-IPTABLES-HINT chain in iptables as a hint to
// other components about the configuration of iptables on the system.
MakeIPTablesUtilChains bool
// iptablesMasqueradeBit formerly controlled the creation of the KUBE-MARK-MASQ
// chain.
// Deprecated: no longer has any effect.
IPTablesMasqueradeBit int32
// iptablesDropBit formerly controlled the creation of the KUBE-MARK-DROP chain.
// Deprecated: no longer has any effect.
IPTablesDropBit int32
// featureGates is a map of feature names to bools that enable or disable alpha/experimental
// features. This field modifies piecemeal the built-in default values from
// "k8s.io/kubernetes/pkg/features/kube_features.go".
FeatureGates map[string]bool
// Tells the Kubelet to fail to start if swap is enabled on the node.
FailSwapOn bool
// memorySwap configures swap memory available to container workloads.
// +featureGate=NodeSwap
// +optional
MemorySwap MemorySwapConfiguration
// A quantity defines the maximum size of the container log file before it is rotated. For example: "5Mi" or "256Ki".
ContainerLogMaxSize string
// Maximum number of container log files that can be present for a container.
ContainerLogMaxFiles int32
// Maximum number of concurrent log rotation workers to spawn for processing the log rotation
// requests
ContainerLogMaxWorkers int32
// Interval at which the container logs are monitored for rotation
ContainerLogMonitorInterval metav1.Duration
// ConfigMapAndSecretChangeDetectionStrategy is a mode in which config map and secret managers are running.
ConfigMapAndSecretChangeDetectionStrategy ResourceChangeDetectionStrategy
// A comma separated allowlist of unsafe sysctls or sysctl patterns (ending in `*`).
// Unsafe sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
// These sysctls are namespaced but not allowed by default.
// For example: "`kernel.msg*,net.ipv4.route.min_pmtu`"
// +optional
AllowedUnsafeSysctls []string
// kernelMemcgNotification if enabled, the kubelet will integrate with the kernel memcg
// notification to determine if memory eviction thresholds are crossed rather than polling.
KernelMemcgNotification bool
/* the following fields are meant for Node Allocatable */
// A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,ephemeral-storage=1G,pid=100) pairs
// that describe resources reserved for non-kubernetes components.
// Currently only cpu, memory and local ephemeral storage for root file system are supported.
// See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources for more detail.
SystemReserved map[string]string
// A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,ephemeral-storage=1G,pid=100) pairs
// that describe resources reserved for kubernetes system components.
// Currently only cpu, memory and local ephemeral storage for root file system are supported.
// See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources for more detail.
KubeReserved map[string]string
// This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons.
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
SystemReservedCgroup string
// This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons.
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
KubeReservedCgroup string
// This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform.
// This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`.
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
EnforceNodeAllocatable []string
// This option specifies the cpu list reserved for the host level system threads and kubernetes related threads.
// This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved.
// This option overwrites CPUs provided by system-reserved and kube-reserved.
ReservedSystemCPUs string
// The previous version for which you want to show hidden metrics.
// Only the previous minor version is meaningful, other values will not be allowed.
// The format is <major>.<minor>, e.g.: '1.16'.
// The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics,
// rather than being surprised when they are permanently removed in the release after that.
ShowHiddenMetricsForVersion string
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging logsapi.LoggingConfiguration
// EnableSystemLogHandler enables /logs handler.
EnableSystemLogHandler bool
// EnableSystemLogQuery enables the node log query feature on the /logs endpoint.
// EnableSystemLogHandler has to be enabled in addition for this feature to work.
// Enabling this feature has security implications. The recommendation is to enable it on a need basis for debugging
// purposes and disabling otherwise.
// +featureGate=NodeLogQuery
// +optional
EnableSystemLogQuery bool
// ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown.
// Defaults to 0 seconds.
// +featureGate=GracefulNodeShutdown
// +optional
ShutdownGracePeriod metav1.Duration
// ShutdownGracePeriodCriticalPods specifies the duration used to terminate critical pods during a node shutdown. This should be less than ShutdownGracePeriod.
// Defaults to 0 seconds.
// For example, if ShutdownGracePeriod=30s, and ShutdownGracePeriodCriticalPods=10s, during a node shutdown the first 20 seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods.
// +featureGate=GracefulNodeShutdown
// +optional
ShutdownGracePeriodCriticalPods metav1.Duration
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based
// on their associated priority class value.
// When a shutdown request is received, the Kubelet will initiate shutdown on all pods
// running on the node with a grace period that depends on the priority of the pod,
// and then wait for all pods to exit.
// Each entry in the array represents the graceful shutdown time a pod with a priority
// class value that lies in the range of that value and the next higher entry in the
// list when the node is shutting down.
ShutdownGracePeriodByPodPriority []ShutdownGracePeriodByPodPriority
// ReservedMemory specifies a comma-separated list of memory reservations for NUMA nodes.
// The parameter makes sense only in the context of the memory manager feature. The memory manager will not allocate reserved memory for container workloads.
// For example, if you have a NUMA0 with 10Gi of memory and the ReservedMemory was specified to reserve 1Gi of memory at NUMA0,
// the memory manager will assume that only 9Gi is available for allocation.
// You can specify a different amount of NUMA node and memory types.
// You can omit this parameter at all, but you should be aware that the amount of reserved memory from all NUMA nodes
// should be equal to the amount of memory specified by the node allocatable features(https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable).
// If at least one node allocatable parameter has a non-zero value, you will need to specify at least one NUMA node.
// Also, avoid specifying:
// 1. Duplicates, the same NUMA node, and memory type, but with a different value.
// 2. zero limits for any memory type.
// 3. NUMAs nodes IDs that do not exist under the machine.
// 4. memory types except for memory and hugepages-<size>
ReservedMemory []MemoryReservation
// EnableProfiling enables /debug/pprof handler.
EnableProfilingHandler bool
// EnableDebugFlagsHandler enables/debug/flags/v handler.
EnableDebugFlagsHandler bool
// SeccompDefault enables the use of `RuntimeDefault` as the default seccomp profile for all workloads.
SeccompDefault bool
// MemoryThrottlingFactor specifies the factor multiplied by the memory limit or node allocatable memory
// when setting the cgroupv2 memory.high value to enforce MemoryQoS.
// Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure
// while increasing will put less reclaim pressure.
// See https://kep.k8s.io/2570 for more details.
// Default: 0.9
// +featureGate=MemoryQoS
// +optional
MemoryThrottlingFactor *float64
// registerWithTaints are an array of taints to add to a node object when
// the kubelet registers itself. This only takes effect when registerNode
// is true and upon the initial registration of the node.
// +optional
RegisterWithTaints []v1.Taint
// registerNode enables automatic registration with the apiserver.
// +optional
RegisterNode bool
// Tracing specifies the versioned configuration for OpenTelemetry tracing clients.
// See https://kep.k8s.io/2832 for more details.
// +featureGate=KubeletTracing
// +optional
Tracing *tracingapi.TracingConfiguration
// LocalStorageCapacityIsolation enables local ephemeral storage isolation feature. The default setting is true.
// This feature allows users to set request/limit for container's ephemeral storage and manage it in a similar way
// as cpu and memory. It also allows setting sizeLimit for emptyDir volume, which will trigger pod eviction if disk
// usage from the volume exceeds the limit.
// This feature depends on the capability of detecting correct root file system disk usage. For certain systems,
// such as kind rootless, if this capability cannot be supported, the feature LocalStorageCapacityIsolation should be
// disabled. Once disabled, user should not set request/limit for container's ephemeral storage, or sizeLimit for emptyDir.
// +optional
LocalStorageCapacityIsolation bool
// ContainerRuntimeEndpoint is the endpoint of container runtime.
// unix domain sockets supported on Linux while npipes and tcp endpoints are supported for windows.
// Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'
ContainerRuntimeEndpoint string
// ImageServiceEndpoint is the endpoint of container image service.
// If not specified the default value is ContainerRuntimeEndpoint
// +optional
ImageServiceEndpoint string
// FailCgroupV1 prevents the kubelet from starting on hosts
// that use cgroup v1. By default, this is set to 'false', meaning
// the kubelet is allowed to start on cgroup v1 hosts unless this
// option is explicitly enabled.
// +optional
FailCgroupV1 bool
// CrashLoopBackOff contains config to modify node-level parameters for
// container restart behavior
// +featureGate=KubeletCrashLoopBackoffMax
// +optional
CrashLoopBackOff CrashLoopBackOffConfig
}
// KubeletAuthorizationMode denotes the authorization mode for the kubelet
type KubeletAuthorizationMode string
const (
// KubeletAuthorizationModeAlwaysAllow authorizes all authenticated requests
KubeletAuthorizationModeAlwaysAllow KubeletAuthorizationMode = "AlwaysAllow"
// KubeletAuthorizationModeWebhook uses the SubjectAccessReview API to determine authorization
KubeletAuthorizationModeWebhook KubeletAuthorizationMode = "Webhook"
)
// KubeletAuthorization holds the state related to the authorization in the kublet.
type KubeletAuthorization struct {
// mode is the authorization mode to apply to requests to the kubelet server.
// Valid values are AlwaysAllow and Webhook.
// Webhook mode uses the SubjectAccessReview API to determine authorization.
Mode KubeletAuthorizationMode
// webhook contains settings related to Webhook authorization.
Webhook KubeletWebhookAuthorization
}
// KubeletWebhookAuthorization holds the state related to the Webhook
// Authorization in the Kubelet.
type KubeletWebhookAuthorization struct {
// cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer.
CacheAuthorizedTTL metav1.Duration
// cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer.
CacheUnauthorizedTTL metav1.Duration
}
// KubeletAuthentication holds the Kubetlet Authentication setttings.
type KubeletAuthentication struct {
// x509 contains settings related to x509 client certificate authentication
X509 KubeletX509Authentication
// webhook contains settings related to webhook bearer token authentication
Webhook KubeletWebhookAuthentication
// anonymous contains settings related to anonymous authentication
Anonymous KubeletAnonymousAuthentication
}
// KubeletX509Authentication contains settings related to x509 client certificate authentication
type KubeletX509Authentication struct {
// clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate
// signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName,
// and groups corresponding to the Organization in the client certificate.
ClientCAFile string
}
// KubeletWebhookAuthentication contains settings related to webhook authentication
type KubeletWebhookAuthentication struct {
// enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API
Enabled bool
// cacheTTL enables caching of authentication results
CacheTTL metav1.Duration
}
// KubeletAnonymousAuthentication enables anonymous requests to the kubelet server.
type KubeletAnonymousAuthentication struct {
// enabled allows anonymous requests to the kubelet server.
// Requests that are not rejected by another authentication method are treated as anonymous requests.
// Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated.
Enabled bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedNodeConfigSource allows us to serialize NodeConfigSource
// This type is used internally by the Kubelet for tracking checkpointed dynamic configs.
// It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet.
type SerializedNodeConfigSource struct {
metav1.TypeMeta
// Source is the source that we are serializing
// +optional
Source v1.NodeConfigSource
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CredentialProviderConfig is the configuration containing information about
// each exec credential provider. Kubelet reads this configuration from disk and enables
// each provider as specified by the CredentialProvider type.
type CredentialProviderConfig struct {
metav1.TypeMeta
// providers is a list of credential provider plugins that will be enabled by the kubelet.
// Multiple providers may match against a single image, in which case credentials
// from all providers will be returned to the kubelet. If multiple providers are called
// for a single image, the results are combined. If providers return overlapping
// auth keys, the value from the provider earlier in this list is used.
Providers []CredentialProvider
}
// CredentialProvider represents an exec plugin to be invoked by the kubelet. The plugin is only
// invoked when an image being pulled matches the images handled by the plugin (see matchImages).
type CredentialProvider struct {
// name is the required name of the credential provider. It must match the name of the
// provider executable as seen by the kubelet. The executable must be in the kubelet's
// bin directory (set by the --credential-provider-bin-dir flag).
Name string
// matchImages is a required list of strings used to match against images in order to
// determine if this provider should be invoked. If one of the strings matches the
// requested image from the kubelet, the plugin will be invoked and given a chance
// to provide credentials. Images are expected to contain the registry domain
// and URL path.
//
// Each entry in matchImages is a pattern which can optionally contain a port and a path.
// Globs can be used in the domain, but not in the port or the path. Globs are supported
// as subdomains like `*.k8s.io` or `k8s.*.io`, and top-level-domains such as `k8s.*`.
// Matching partial subdomains like `app*.k8s.io` is also supported. Each glob can only match
// a single subdomain segment, so `*.io` does not match *.k8s.io.
//
// A match exists between an image and a matchImage when all of the below are true:
// - Both contain the same number of domain parts and each part matches.
// - The URL path of an imageMatch must be a prefix of the target image URL path.
// - If the imageMatch contains a port, then the port must match in the image as well.
//
// Example values of matchImages:
// - `123456789.dkr.ecr.us-east-1.amazonaws.com`
// - `*.azurecr.io`
// - `gcr.io`
// - `*.*.registry.io`
// - `registry.io:8080/path`
MatchImages []string
// defaultCacheDuration is the default duration the plugin will cache credentials in-memory
// if a cache duration is not provided in the plugin response. This field is required.
DefaultCacheDuration *metav1.Duration
// Required input version of the exec CredentialProviderRequest. The returned CredentialProviderResponse
// MUST use the same encoding version as the input. Current supported values are:
// - credentialprovider.kubelet.k8s.io/v1alpha1
// - credentialprovider.kubelet.k8s.io/v1beta1
// - credentialprovider.kubelet.k8s.io/v1
APIVersion string
// Arguments to pass to the command when executing it.
// +optional
Args []string
// Env defines additional environment variables to expose to the process. These
// are unioned with the host's environment, as well as variables client-go uses
// to pass argument to the plugin.
// +optional
Env []ExecEnvVar
}
// ExecEnvVar is used for setting environment variables when executing an exec-based
// credential plugin.
type ExecEnvVar struct {
Name string
Value string
}
// MemoryReservation specifies the memory reservation of different types for each NUMA node
type MemoryReservation struct {
NumaNode int32
Limits v1.ResourceList
}
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based on their associated priority class value
type ShutdownGracePeriodByPodPriority struct {
// priority is the priority value associated with the shutdown grace period
Priority int32
// shutdownGracePeriodSeconds is the shutdown grace period in seconds
ShutdownGracePeriodSeconds int64
}
type MemorySwapConfiguration struct {
// swapBehavior configures swap memory available to container workloads. May be one of
// "", "NoSwap": workloads can not use swap, default option.
// "LimitedSwap": workload swap usage is limited. The swap limit is proportionate to the container's memory request.
// +featureGate=NodeSwap
// +optional
SwapBehavior string
}
// CrashLoopBackOffConfig is used for setting configuration for this kubelet's
// container restart behavior
type CrashLoopBackOffConfig struct {
// MaxContainerRestartPeriod is the maximum duration the backoff delay can accrue
// to for container restarts, minimum 1 second, maximum 300 seconds.
// +featureGate=KubeletCrashLoopBackOffMax
// +optional
MaxContainerRestartPeriod *metav1.Duration
}

View File

@ -0,0 +1,508 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/component-base/tracing/api/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrashLoopBackOffConfig) DeepCopyInto(out *CrashLoopBackOffConfig) {
*out = *in
if in.MaxContainerRestartPeriod != nil {
in, out := &in.MaxContainerRestartPeriod, &out.MaxContainerRestartPeriod
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrashLoopBackOffConfig.
func (in *CrashLoopBackOffConfig) DeepCopy() *CrashLoopBackOffConfig {
if in == nil {
return nil
}
out := new(CrashLoopBackOffConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialProvider) DeepCopyInto(out *CredentialProvider) {
*out = *in
if in.MatchImages != nil {
in, out := &in.MatchImages, &out.MatchImages
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DefaultCacheDuration != nil {
in, out := &in.DefaultCacheDuration, &out.DefaultCacheDuration
*out = new(v1.Duration)
**out = **in
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]ExecEnvVar, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialProvider.
func (in *CredentialProvider) DeepCopy() *CredentialProvider {
if in == nil {
return nil
}
out := new(CredentialProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialProviderConfig) DeepCopyInto(out *CredentialProviderConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]CredentialProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialProviderConfig.
func (in *CredentialProviderConfig) DeepCopy() *CredentialProviderConfig {
if in == nil {
return nil
}
out := new(CredentialProviderConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CredentialProviderConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
if in == nil {
return nil
}
out := new(ExecEnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAnonymousAuthentication.
func (in *KubeletAnonymousAuthentication) DeepCopy() *KubeletAnonymousAuthentication {
if in == nil {
return nil
}
out := new(KubeletAnonymousAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAuthentication) DeepCopyInto(out *KubeletAuthentication) {
*out = *in
out.X509 = in.X509
out.Webhook = in.Webhook
out.Anonymous = in.Anonymous
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthentication.
func (in *KubeletAuthentication) DeepCopy() *KubeletAuthentication {
if in == nil {
return nil
}
out := new(KubeletAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAuthorization) DeepCopyInto(out *KubeletAuthorization) {
*out = *in
out.Webhook = in.Webhook
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthorization.
func (in *KubeletAuthorization) DeepCopy() *KubeletAuthorization {
if in == nil {
return nil
}
out := new(KubeletAuthorization)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.SyncFrequency = in.SyncFrequency
out.FileCheckFrequency = in.FileCheckFrequency
out.HTTPCheckFrequency = in.HTTPCheckFrequency
if in.StaticPodURLHeader != nil {
in, out := &in.StaticPodURLHeader, &out.StaticPodURLHeader
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.TLSCipherSuites != nil {
in, out := &in.TLSCipherSuites, &out.TLSCipherSuites
*out = make([]string, len(*in))
copy(*out, *in)
}
out.Authentication = in.Authentication
out.Authorization = in.Authorization
if in.ClusterDNS != nil {
in, out := &in.ClusterDNS, &out.ClusterDNS
*out = make([]string, len(*in))
copy(*out, *in)
}
out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout
out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
out.NodeStatusReportFrequency = in.NodeStatusReportFrequency
out.ImageMinimumGCAge = in.ImageMinimumGCAge
out.ImageMaximumGCAge = in.ImageMaximumGCAge
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
if in.SingleProcessOOMKill != nil {
in, out := &in.SingleProcessOOMKill, &out.SingleProcessOOMKill
*out = new(bool)
**out = **in
}
if in.CPUManagerPolicyOptions != nil {
in, out := &in.CPUManagerPolicyOptions, &out.CPUManagerPolicyOptions
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
if in.TopologyManagerPolicyOptions != nil {
in, out := &in.TopologyManagerPolicyOptions, &out.TopologyManagerPolicyOptions
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.QOSReserved != nil {
in, out := &in.QOSReserved, &out.QOSReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.CPUCFSQuotaPeriod = in.CPUCFSQuotaPeriod
if in.MaxParallelImagePulls != nil {
in, out := &in.MaxParallelImagePulls, &out.MaxParallelImagePulls
*out = new(int32)
**out = **in
}
if in.EvictionHard != nil {
in, out := &in.EvictionHard, &out.EvictionHard
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EvictionSoft != nil {
in, out := &in.EvictionSoft, &out.EvictionSoft
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EvictionSoftGracePeriod != nil {
in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
if in.EvictionMinimumReclaim != nil {
in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.MemorySwap = in.MemorySwap
out.ContainerLogMonitorInterval = in.ContainerLogMonitorInterval
if in.AllowedUnsafeSysctls != nil {
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SystemReserved != nil {
in, out := &in.SystemReserved, &out.SystemReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.KubeReserved != nil {
in, out := &in.KubeReserved, &out.KubeReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EnforceNodeAllocatable != nil {
in, out := &in.EnforceNodeAllocatable, &out.EnforceNodeAllocatable
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Logging.DeepCopyInto(&out.Logging)
out.ShutdownGracePeriod = in.ShutdownGracePeriod
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
if in.ShutdownGracePeriodByPodPriority != nil {
in, out := &in.ShutdownGracePeriodByPodPriority, &out.ShutdownGracePeriodByPodPriority
*out = make([]ShutdownGracePeriodByPodPriority, len(*in))
copy(*out, *in)
}
if in.ReservedMemory != nil {
in, out := &in.ReservedMemory, &out.ReservedMemory
*out = make([]MemoryReservation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MemoryThrottlingFactor != nil {
in, out := &in.MemoryThrottlingFactor, &out.MemoryThrottlingFactor
*out = new(float64)
**out = **in
}
if in.RegisterWithTaints != nil {
in, out := &in.RegisterWithTaints, &out.RegisterWithTaints
*out = make([]corev1.Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Tracing != nil {
in, out := &in.Tracing, &out.Tracing
*out = new(apiv1.TracingConfiguration)
(*in).DeepCopyInto(*out)
}
in.CrashLoopBackOff.DeepCopyInto(&out.CrashLoopBackOff)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfiguration.
func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration {
if in == nil {
return nil
}
out := new(KubeletConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeletConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletWebhookAuthentication) DeepCopyInto(out *KubeletWebhookAuthentication) {
*out = *in
out.CacheTTL = in.CacheTTL
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthentication.
func (in *KubeletWebhookAuthentication) DeepCopy() *KubeletWebhookAuthentication {
if in == nil {
return nil
}
out := new(KubeletWebhookAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletWebhookAuthorization) DeepCopyInto(out *KubeletWebhookAuthorization) {
*out = *in
out.CacheAuthorizedTTL = in.CacheAuthorizedTTL
out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthorization.
func (in *KubeletWebhookAuthorization) DeepCopy() *KubeletWebhookAuthorization {
if in == nil {
return nil
}
out := new(KubeletWebhookAuthorization)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletX509Authentication) DeepCopyInto(out *KubeletX509Authentication) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletX509Authentication.
func (in *KubeletX509Authentication) DeepCopy() *KubeletX509Authentication {
if in == nil {
return nil
}
out := new(KubeletX509Authentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryReservation) DeepCopyInto(out *MemoryReservation) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryReservation.
func (in *MemoryReservation) DeepCopy() *MemoryReservation {
if in == nil {
return nil
}
out := new(MemoryReservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemorySwapConfiguration) DeepCopyInto(out *MemorySwapConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemorySwapConfiguration.
func (in *MemorySwapConfiguration) DeepCopy() *MemorySwapConfiguration {
if in == nil {
return nil
}
out := new(MemorySwapConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerializedNodeConfigSource) DeepCopyInto(out *SerializedNodeConfigSource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedNodeConfigSource.
func (in *SerializedNodeConfigSource) DeepCopy() *SerializedNodeConfigSource {
if in == nil {
return nil
}
out := new(SerializedNodeConfigSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShutdownGracePeriodByPodPriority.
func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPodPriority {
if in == nil {
return nil
}
out := new(ShutdownGracePeriodByPodPriority)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,16 @@
---
dir: testing
filename: "{{.InterfaceName | snakecase}}.go"
boilerplate-file: ../../../../hack/boilerplate/boilerplate.generatego.txt
outpkg: testing
with-expecter: true
packages:
k8s.io/kubernetes/pkg/kubelet/apis/podresources:
interfaces:
CPUsProvider:
config:
filename: cpus_provider.go
DevicesProvider:
DynamicResourcesProvider:
MemoryProvider:
PodsProvider:

View File

@ -0,0 +1,72 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
import (
"context"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/cri-client/pkg/util"
"k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
)
// Note: Consumers of the pod resources API should not be importing this package.
// They should copy paste the function in their project.
// GetV1alpha1Client returns a client for the PodResourcesLister grpc service
// Note: This is deprecated
func GetV1alpha1Client(socket string, connectionTimeout time.Duration, maxMsgSize int) (v1alpha1.PodResourcesListerClient, *grpc.ClientConn, error) {
addr, dialer, err := util.GetAddressAndDialer(socket)
if err != nil {
return nil, nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout)
defer cancel()
conn, err := grpc.DialContext(ctx, addr,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
if err != nil {
return nil, nil, fmt.Errorf("error dialing socket %s: %v", socket, err)
}
return v1alpha1.NewPodResourcesListerClient(conn), conn, nil
}
// GetV1Client returns a client for the PodResourcesLister grpc service
func GetV1Client(socket string, connectionTimeout time.Duration, maxMsgSize int) (v1.PodResourcesListerClient, *grpc.ClientConn, error) {
addr, dialer, err := util.GetAddressAndDialer(socket)
if err != nil {
return nil, nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout)
defer cancel()
conn, err := grpc.DialContext(ctx, addr,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
if err != nil {
return nil, nil, fmt.Errorf("error dialing socket %s: %v", socket, err)
}
return v1.NewPodResourcesListerClient(conn), conn, nil
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
const (
// Socket is the name of the podresources server socket
Socket = "kubelet"
// DefaultQPS is determined by empirically reviewing known consumers of the API.
// It's at least unlikely that there is a legitimate need to query podresources
// more than 100 times per second, the other subsystems are not guaranteed to react
// so fast in the first place.
DefaultQPS = 100
// DefaultBurstTokens is determined by empirically reviewing known consumers of the API.
// See the documentation of DefaultQPS, same caveats apply.
DefaultBurstTokens = 10
)

View File

@ -0,0 +1,163 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/metrics"
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
)
// v1PodResourcesServer implements PodResourcesListerServer
type v1PodResourcesServer struct {
podsProvider PodsProvider
devicesProvider DevicesProvider
cpusProvider CPUsProvider
memoryProvider MemoryProvider
dynamicResourcesProvider DynamicResourcesProvider
}
// NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
// with device information provided by the DevicesProvider
func NewV1PodResourcesServer(providers PodResourcesProviders) podresourcesv1.PodResourcesListerServer {
return &v1PodResourcesServer{
podsProvider: providers.Pods,
devicesProvider: providers.Devices,
cpusProvider: providers.Cpus,
memoryProvider: providers.Memory,
dynamicResourcesProvider: providers.DynamicResources,
}
}
// List returns information about the resources assigned to pods on the node
func (p *v1PodResourcesServer) List(ctx context.Context, req *podresourcesv1.ListPodResourcesRequest) (*podresourcesv1.ListPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsListCount.WithLabelValues("v1").Inc()
pods := p.podsProvider.GetPods()
podResources := make([]*podresourcesv1.PodResources, len(pods))
p.devicesProvider.UpdateAllocatedDevices()
for i, pod := range pods {
pRes := podresourcesv1.PodResources{
Name: pod.Name,
Namespace: pod.Namespace,
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
for _, container := range pod.Spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
continue
}
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
}
}
for _, container := range pod.Spec.Containers {
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
}
podResources[i] = &pRes
}
response := &podresourcesv1.ListPodResourcesResponse{
PodResources: podResources,
}
return response, nil
}
// GetAllocatableResources returns information about all the resources known by the server - this more like the capacity, not like the current amount of free resources.
func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *podresourcesv1.AllocatableResourcesRequest) (*podresourcesv1.AllocatableResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsGetAllocatableCount.WithLabelValues("v1").Inc()
response := &podresourcesv1.AllocatableResourcesResponse{
Devices: p.devicesProvider.GetAllocatableDevices(),
CpuIds: p.cpusProvider.GetAllocatableCPUs(),
Memory: p.memoryProvider.GetAllocatableMemory(),
}
return response, nil
}
// Get returns information about the resources assigned to a specific pod
func (p *v1PodResourcesServer) Get(ctx context.Context, req *podresourcesv1.GetPodResourcesRequest) (*podresourcesv1.GetPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsGetCount.WithLabelValues("v1").Inc()
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesGet) {
metrics.PodResourcesEndpointErrorsGetCount.WithLabelValues("v1").Inc()
return nil, fmt.Errorf("PodResources API Get method disabled")
}
pod, exist := p.podsProvider.GetPodByName(req.PodNamespace, req.PodName)
if !exist {
metrics.PodResourcesEndpointErrorsGetCount.WithLabelValues("v1").Inc()
return nil, fmt.Errorf("pod %s in namespace %s not found", req.PodName, req.PodNamespace)
}
podResources := &podresourcesv1.PodResources{
Name: pod.Name,
Namespace: pod.Namespace,
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
for _, container := range pod.Spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
continue
}
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
}
}
for _, container := range pod.Spec.Containers {
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
}
response := &podresourcesv1.GetPodResourcesResponse{
PodResources: podResources,
}
return response, nil
}
func (p *v1PodResourcesServer) getContainerResources(pod *v1.Pod, container *v1.Container) *podresourcesv1.ContainerResources {
containerResources := &podresourcesv1.ContainerResources{
Name: container.Name,
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name),
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) {
containerResources.DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, container)
}
return containerResources
}

View File

@ -0,0 +1,82 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
import (
"context"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
)
// v1alpha1PodResourcesServer implements PodResourcesListerServer
type v1alpha1PodResourcesServer struct {
podsProvider PodsProvider
devicesProvider DevicesProvider
}
// NewV1alpha1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
// with device information provided by the DevicesProvider
func NewV1alpha1PodResourcesServer(providers PodResourcesProviders) v1alpha1.PodResourcesListerServer {
return &v1alpha1PodResourcesServer{
podsProvider: providers.Pods,
devicesProvider: providers.Devices,
}
}
func v1DevicesToAlphaV1(alphaDevs []*v1.ContainerDevices) []*v1alpha1.ContainerDevices {
var devs []*v1alpha1.ContainerDevices
for _, alphaDev := range alphaDevs {
dev := v1alpha1.ContainerDevices{
ResourceName: alphaDev.ResourceName,
DeviceIds: alphaDev.DeviceIds,
}
devs = append(devs, &dev)
}
return devs
}
// List returns information about the resources assigned to pods on the node
func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1alpha1").Inc()
pods := p.podsProvider.GetPods()
podResources := make([]*v1alpha1.PodResources, len(pods))
p.devicesProvider.UpdateAllocatedDevices()
for i, pod := range pods {
pRes := v1alpha1.PodResources{
Name: pod.Name,
Namespace: pod.Namespace,
Containers: make([]*v1alpha1.ContainerResources, len(pod.Spec.Containers)),
}
for j, container := range pod.Spec.Containers {
pRes.Containers[j] = &v1alpha1.ContainerResources{
Name: container.Name,
Devices: v1DevicesToAlphaV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
}
}
podResources[i] = &pRes
}
return &v1alpha1.ListPodResourcesResponse{
PodResources: podResources,
}, nil
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package podresources
import (
v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
)
// DevicesProvider knows how to provide the devices used by the given container
type DevicesProvider interface {
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
UpdateAllocatedDevices()
// GetDevices returns information about the devices assigned to pods and containers
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
// GetAllocatableDevices returns information about all the devices known to the manager
GetAllocatableDevices() []*podresourcesapi.ContainerDevices
}
// PodsProvider knows how to provide the pods admitted by the node
type PodsProvider interface {
GetPods() []*v1.Pod
GetPodByName(namespace, name string) (*v1.Pod, bool)
}
// CPUsProvider knows how to provide the cpus used by the given container
type CPUsProvider interface {
// GetCPUs returns information about the cpus assigned to pods and containers
GetCPUs(podUID, containerName string) []int64
// GetAllocatableCPUs returns the allocatable (not allocated) CPUs
GetAllocatableCPUs() []int64
}
type MemoryProvider interface {
// GetMemory returns information about the memory assigned to containers
GetMemory(podUID, containerName string) []*podresourcesapi.ContainerMemory
// GetAllocatableMemory returns the allocatable memory from the node
GetAllocatableMemory() []*podresourcesapi.ContainerMemory
}
type DynamicResourcesProvider interface {
// GetDynamicResources returns information about dynamic resources assigned to pods and containers
GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource
}
type PodResourcesProviders struct {
Pods PodsProvider
Devices DevicesProvider
Cpus CPUsProvider
Memory MemoryProvider
DynamicResources DynamicResourcesProvider
}

View File

@ -0,0 +1,10 @@
---
dir: testing
filename: cadvisor_mock.go
boilerplate-file: ../../../hack/boilerplate/boilerplate.generatego.txt
outpkg: testing
with-expecter: true
packages:
k8s.io/kubernetes/pkg/kubelet/cadvisor:
interfaces:
Interface:

View File

@ -0,0 +1,179 @@
//go:build linux
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"path"
"time"
// Register supported container handlers.
_ "github.com/google/cadvisor/container/containerd/install"
_ "github.com/google/cadvisor/container/crio/install"
_ "github.com/google/cadvisor/container/systemd/install"
"github.com/google/cadvisor/cache/memory"
cadvisormetrics "github.com/google/cadvisor/container"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/manager"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
)
type cadvisorClient struct {
imageFsInfoProvider ImageFsInfoProvider
rootPath string
manager.Manager
}
var _ Interface = new(cadvisorClient)
// TODO(vmarmol): Make configurable.
// The amount of time for which to keep stats in memory.
const statsCacheDuration = 2 * time.Minute
const maxHousekeepingInterval = 15 * time.Second
const defaultHousekeepingInterval = 10 * time.Second
const allowDynamicHousekeeping = true
func init() {
// Override cAdvisor flag defaults.
flagOverrides := map[string]string{
// Override the default cAdvisor housekeeping interval.
"housekeeping_interval": defaultHousekeepingInterval.String(),
// Disable event storage by default.
"event_storage_event_limit": "default=0",
"event_storage_age_limit": "default=0",
}
for name, defaultValue := range flagOverrides {
if f := flag.Lookup(name); f != nil {
f.DefValue = defaultValue
f.Value.Set(defaultValue)
} else {
ctx := context.Background()
klog.FromContext(ctx).Error(nil, "Expected cAdvisor flag not found", "flag", name)
}
}
}
// New creates a new cAdvisor Interface for linux systems.
func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupRoots []string, usingLegacyStats, localStorageCapacityIsolation bool) (Interface, error) {
sysFs := sysfs.NewRealSysFs()
includedMetrics := cadvisormetrics.MetricSet{
cadvisormetrics.CpuUsageMetrics: struct{}{},
cadvisormetrics.MemoryUsageMetrics: struct{}{},
cadvisormetrics.CpuLoadMetrics: struct{}{},
cadvisormetrics.DiskIOMetrics: struct{}{},
cadvisormetrics.NetworkUsageMetrics: struct{}{},
cadvisormetrics.AppMetrics: struct{}{},
cadvisormetrics.ProcessMetrics: struct{}{},
cadvisormetrics.OOMMetrics: struct{}{},
}
if usingLegacyStats || localStorageCapacityIsolation {
includedMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{}
}
duration := maxHousekeepingInterval
housekeepingConfig := manager.HousekeepingConfig{
Interval: &duration,
AllowDynamic: ptr.To(allowDynamicHousekeeping),
}
// Create the cAdvisor container manager.
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, housekeepingConfig, includedMetrics, http.DefaultClient, cgroupRoots, nil /* containerEnvMetadataWhiteList */, "" /* perfEventsFile */, time.Duration(0) /*resctrlInterval*/)
if err != nil {
return nil, err
}
if _, err := os.Stat(rootPath); err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(path.Clean(rootPath), 0750); err != nil {
return nil, fmt.Errorf("error creating root directory %q: %v", rootPath, err)
}
} else {
return nil, fmt.Errorf("failed to Stat %q: %v", rootPath, err)
}
}
return &cadvisorClient{
imageFsInfoProvider: imageFsInfoProvider,
rootPath: rootPath,
Manager: m,
}, nil
}
func (cc *cadvisorClient) Start() error {
return cc.Manager.Start()
}
func (cc *cadvisorClient) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
return cc.GetContainerInfoV2(name, options)
}
func (cc *cadvisorClient) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return cc.GetVersionInfo()
}
func (cc *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) {
return cc.GetMachineInfo()
}
func (cc *cadvisorClient) ImagesFsInfo(ctx context.Context) (cadvisorapiv2.FsInfo, error) {
label, err := cc.imageFsInfoProvider.ImageFsInfoLabel()
if err != nil {
return cadvisorapiv2.FsInfo{}, err
}
return cc.getFsInfo(ctx, label)
}
func (cc *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return cc.GetDirFsInfo(cc.rootPath)
}
func (cc *cadvisorClient) getFsInfo(ctx context.Context, label string) (cadvisorapiv2.FsInfo, error) {
res, err := cc.GetFsInfo(label)
if err != nil {
return cadvisorapiv2.FsInfo{}, err
}
if len(res) == 0 {
return cadvisorapiv2.FsInfo{}, fmt.Errorf("failed to find information for the filesystem labeled %q", label)
}
// TODO(vmarmol): Handle this better when a label has more than one image filesystem.
if len(res) > 1 {
klog.FromContext(ctx).Info("More than one filesystem labeled. Only using the first one", "label", label, "fileSystem", res)
}
return res[0], nil
}
func (cc *cadvisorClient) ContainerFsInfo(ctx context.Context) (cadvisorapiv2.FsInfo, error) {
label, err := cc.imageFsInfoProvider.ContainerFsInfoLabel()
if err != nil {
return cadvisorapiv2.FsInfo{}, err
}
return cc.getFsInfo(ctx, label)
}

View File

@ -0,0 +1,76 @@
//go:build !linux && !windows
// +build !linux,!windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"context"
"errors"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
)
type cadvisorUnsupported struct {
}
var _ Interface = new(cadvisorUnsupported)
// New creates a new cAdvisor Interface for unsupported systems.
func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupsRoots []string, usingLegacyStats, localStorageCapacityIsolation bool) (Interface, error) {
return &cadvisorUnsupported{}, nil
}
var errUnsupported = errors.New("cAdvisor is unsupported in this build")
func (cu *cadvisorUnsupported) Start() error {
return errUnsupported
}
func (cu *cadvisorUnsupported) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
return nil, errUnsupported
}
func (cu *cadvisorUnsupported) GetRequestedContainersInfo(containerName string, options cadvisorapiv2.RequestOptions) (map[string]*cadvisorapi.ContainerInfo, error) {
return nil, errUnsupported
}
func (cu *cadvisorUnsupported) MachineInfo() (*cadvisorapi.MachineInfo, error) {
return nil, errUnsupported
}
func (cu *cadvisorUnsupported) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return nil, errUnsupported
}
func (cu *cadvisorUnsupported) ImagesFsInfo(context.Context) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, errUnsupported
}
func (cu *cadvisorUnsupported) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, errUnsupported
}
func (cu *cadvisorUnsupported) ContainerFsInfo(context.Context) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, errUnsupported
}
func (cu *cadvisorUnsupported) GetDirFsInfo(path string) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}

View File

@ -0,0 +1,81 @@
//go:build windows
// +build windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"context"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/kubernetes/pkg/kubelet/winstats"
)
type cadvisorClient struct {
rootPath string
winStatsClient winstats.Client
}
var _ Interface = new(cadvisorClient)
// New creates a cAdvisor and exports its API on the specified port if port > 0.
func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupRoots []string, usingLegacyStats, localStorageCapacityIsolation bool) (Interface, error) {
client, err := winstats.NewPerfCounterClient()
return &cadvisorClient{
rootPath: rootPath,
winStatsClient: client,
}, err
}
func (cu *cadvisorClient) Start() error {
return nil
}
// ContainerInfoV2 is only expected to be used for the root container. Returns info for all containers in the node.
func (cu *cadvisorClient) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
return cu.winStatsClient.WinContainerInfos()
}
func (cu *cadvisorClient) GetRequestedContainersInfo(containerName string, options cadvisorapiv2.RequestOptions) (map[string]*cadvisorapi.ContainerInfo, error) {
return nil, nil
}
func (cu *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) {
return cu.winStatsClient.WinMachineInfo()
}
func (cu *cadvisorClient) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return cu.winStatsClient.WinVersionInfo()
}
func (cu *cadvisorClient) ImagesFsInfo(context.Context) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}
func (cu *cadvisorClient) ContainerFsInfo(context.Context) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}
func (cu *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return cu.GetDirFsInfo(cu.rootPath)
}
func (cu *cadvisorClient) GetDirFsInfo(path string) (cadvisorapiv2.FsInfo, error) {
return cu.winStatsClient.GetDirFsInfo(path)
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cadvisor provides an interface for Kubelet interactions with cAdvisor.
package cadvisor // import "k8s.io/kubernetes/pkg/kubelet/cadvisor"

Some files were not shown because too many files have changed in this diff Show More