mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to v1.20.0
updated kubernetes packages to latest release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
4abe128bd8
commit
83559144b1
2
vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS
generated
vendored
@ -10,13 +10,11 @@ reviewers:
|
||||
- sttts
|
||||
- saad-ali
|
||||
- ncdc
|
||||
- tallclair
|
||||
- dims
|
||||
- errordeveloper
|
||||
- mml
|
||||
- m1093782566
|
||||
- mbohlool
|
||||
- david-mcmahon
|
||||
- kevin-wangzefeng
|
||||
- jianhuiz
|
||||
labels:
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD
generated
vendored
@ -1,57 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/apis/apps/validation",
|
||||
deps = [
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
687
vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go
generated
vendored
687
vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go
generated
vendored
@ -1,687 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
)
|
||||
|
||||
// ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
func ValidateStatefulSetName(name string, prefix bool) []string {
|
||||
// TODO: Validate that there's name for the suffix inserted by the pods.
|
||||
// Currently this is just "-index". In the future we may allow a user
|
||||
// specified list of suffixes and we need to validate the longest one.
|
||||
return apimachineryvalidation.NameIsDNSSubdomain(name, prefix)
|
||||
}
|
||||
|
||||
// ValidatePodTemplateSpecForStatefulSet validates the given template and ensures that it is in accordance with the desired selector.
|
||||
func ValidatePodTemplateSpecForStatefulSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if template == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath, ""))
|
||||
} else {
|
||||
if !selector.Empty() {
|
||||
// Verify that the StatefulSet selector matches the labels in template.
|
||||
labels := labels.Set(template.Labels)
|
||||
if !selector.Matches(labels) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
|
||||
}
|
||||
}
|
||||
// TODO: Add validation for PodSpec, currently this will check volumes, which we know will
|
||||
// fail. We should really check that the union of the given volumes and volumeClaims match
|
||||
// volume mounts in the containers.
|
||||
// allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, &template.Spec, fldPath.Child("annotations"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateStatefulSetSpec tests if required fields in the StatefulSet spec are set.
|
||||
func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
switch spec.PodManagementPolicy {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("podManagementPolicy"), ""))
|
||||
case apps.OrderedReadyPodManagement, apps.ParallelPodManagement:
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("podManagementPolicy"), spec.PodManagementPolicy, fmt.Sprintf("must be '%s' or '%s'", apps.OrderedReadyPodManagement, apps.ParallelPodManagement)))
|
||||
}
|
||||
|
||||
switch spec.UpdateStrategy.Type {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("updateStrategy"), ""))
|
||||
case apps.OnDeleteStatefulSetStrategyType:
|
||||
if spec.UpdateStrategy.RollingUpdate != nil {
|
||||
allErrs = append(
|
||||
allErrs,
|
||||
field.Invalid(
|
||||
fldPath.Child("updateStrategy").Child("rollingUpdate"),
|
||||
spec.UpdateStrategy.RollingUpdate,
|
||||
fmt.Sprintf("only allowed for updateStrategy '%s'", apps.RollingUpdateStatefulSetStrategyType)))
|
||||
}
|
||||
case apps.RollingUpdateStatefulSetStrategyType:
|
||||
if spec.UpdateStrategy.RollingUpdate != nil {
|
||||
allErrs = append(allErrs,
|
||||
apivalidation.ValidateNonnegativeField(
|
||||
int64(spec.UpdateStrategy.RollingUpdate.Partition),
|
||||
fldPath.Child("updateStrategy").Child("rollingUpdate").Child("partition"))...)
|
||||
}
|
||||
default:
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("updateStrategy"), spec.UpdateStrategy,
|
||||
fmt.Sprintf("must be '%s' or '%s'",
|
||||
apps.RollingUpdateStatefulSetStrategyType,
|
||||
apps.OnDeleteStatefulSetStrategyType)))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
if spec.Selector == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for statefulset"))
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForStatefulSet(&spec.Template, selector, fldPath.Child("template"))...)
|
||||
}
|
||||
|
||||
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
|
||||
}
|
||||
if spec.Template.Spec.ActiveDeadlineSeconds != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("template", "spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in StatefulSet is not Supported"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateStatefulSet validates a StatefulSet.
|
||||
func ValidateStatefulSet(statefulSet *apps.StatefulSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&statefulSet.ObjectMeta, true, ValidateStatefulSetName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set.
|
||||
func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))
|
||||
|
||||
restoreReplicas := statefulSet.Spec.Replicas
|
||||
statefulSet.Spec.Replicas = oldStatefulSet.Spec.Replicas
|
||||
|
||||
restoreTemplate := statefulSet.Spec.Template
|
||||
statefulSet.Spec.Template = oldStatefulSet.Spec.Template
|
||||
|
||||
restoreStrategy := statefulSet.Spec.UpdateStrategy
|
||||
statefulSet.Spec.UpdateStrategy = oldStatefulSet.Spec.UpdateStrategy
|
||||
|
||||
if !apiequality.Semantic.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden"))
|
||||
}
|
||||
statefulSet.Spec.Replicas = restoreReplicas
|
||||
statefulSet.Spec.Template = restoreTemplate
|
||||
statefulSet.Spec.UpdateStrategy = restoreStrategy
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(statefulSet.Spec.Replicas), field.NewPath("spec", "replicas"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateStatefulSetStatus validates a StatefulSetStatus.
|
||||
func ValidateStatefulSetStatus(status *apps.StatefulSetStatus, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fieldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fieldPath.Child("readyReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), fieldPath.Child("currentReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fieldPath.Child("updatedReplicas"))...)
|
||||
if status.ObservedGeneration != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.ObservedGeneration), fieldPath.Child("observedGeneration"))...)
|
||||
}
|
||||
if status.CollisionCount != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fieldPath.Child("collisionCount"))...)
|
||||
}
|
||||
|
||||
msg := "cannot be greater than status.replicas"
|
||||
if status.ReadyReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
|
||||
}
|
||||
if status.CurrentReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("currentReplicas"), status.CurrentReplicas, msg))
|
||||
}
|
||||
if status.UpdatedReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateStatefulSetStatusUpdate tests if required fields in the StatefulSet are set.
|
||||
func ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidateStatefulSetStatus(&statefulSet.Status, field.NewPath("status"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))...)
|
||||
// TODO: Validate status.
|
||||
if apivalidation.IsDecremented(statefulSet.Status.CollisionCount, oldStatefulSet.Status.CollisionCount) {
|
||||
value := int32(0)
|
||||
if statefulSet.Status.CollisionCount != nil {
|
||||
value = *statefulSet.Status.CollisionCount
|
||||
}
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateControllerRevisionName can be used to check whether the given ControllerRevision name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidateControllerRevisionName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// ValidateControllerRevision collects errors for the fields of state and returns those errors as an ErrorList. If the
|
||||
// returned list is empty, state is valid. Validation is performed to ensure that state is a valid ObjectMeta, its name
|
||||
// is valid, and that it doesn't exceed the MaxControllerRevisionSize.
|
||||
func ValidateControllerRevision(revision *apps.ControllerRevision) field.ErrorList {
|
||||
errs := field.ErrorList{}
|
||||
|
||||
errs = append(errs, apivalidation.ValidateObjectMeta(&revision.ObjectMeta, true, ValidateControllerRevisionName, field.NewPath("metadata"))...)
|
||||
if revision.Data == nil {
|
||||
errs = append(errs, field.Required(field.NewPath("data"), "data is mandatory"))
|
||||
}
|
||||
errs = append(errs, apivalidation.ValidateNonnegativeField(revision.Revision, field.NewPath("revision"))...)
|
||||
return errs
|
||||
}
|
||||
|
||||
// ValidateControllerRevisionUpdate collects errors pertaining to the mutation of an ControllerRevision Object. If the
|
||||
// returned ErrorList is empty the update operation is valid. Any mutation to the ControllerRevision's Data or Revision
|
||||
// is considered to be invalid.
|
||||
func ValidateControllerRevisionUpdate(newHistory, oldHistory *apps.ControllerRevision) field.ErrorList {
|
||||
errs := field.ErrorList{}
|
||||
|
||||
errs = append(errs, apivalidation.ValidateObjectMetaUpdate(&newHistory.ObjectMeta, &oldHistory.ObjectMeta, field.NewPath("metadata"))...)
|
||||
errs = append(errs, ValidateControllerRevision(newHistory)...)
|
||||
errs = append(errs, apivalidation.ValidateImmutableField(newHistory.Data, oldHistory.Data, field.NewPath("data"))...)
|
||||
return errs
|
||||
}
|
||||
|
||||
// ValidateDaemonSet tests if required fields in the DaemonSet are set.
|
||||
func ValidateDaemonSet(ds *apps.DaemonSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set.
|
||||
func ValidateDaemonSetUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDaemonSetSpecUpdate(&ds.Spec, &oldDS.Spec, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetSpecUpdate tests if an update to a DaemonSetSpec is valid.
|
||||
func ValidateDaemonSetSpecUpdate(newSpec, oldSpec *apps.DaemonSetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// TemplateGeneration shouldn't be decremented
|
||||
if newSpec.TemplateGeneration < oldSpec.TemplateGeneration {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be decremented"))
|
||||
}
|
||||
|
||||
// TemplateGeneration should be increased when and only when template is changed
|
||||
templateUpdated := !apiequality.Semantic.DeepEqual(newSpec.Template, oldSpec.Template)
|
||||
if newSpec.TemplateGeneration == oldSpec.TemplateGeneration && templateUpdated {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must be incremented upon template update"))
|
||||
} else if newSpec.TemplateGeneration > oldSpec.TemplateGeneration && !templateUpdated {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be incremented without template update"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateDaemonSetStatus validates a DaemonSetStatus
|
||||
func validateDaemonSetStatus(status *apps.DaemonSetStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child("updatedNumberScheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child("numberAvailable"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child("numberUnavailable"))...)
|
||||
if status.CollisionCount != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetStatusUpdate tests if required fields in the DaemonSet Status section
|
||||
func ValidateDaemonSetStatusUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...)
|
||||
if apivalidation.IsDecremented(ds.Status.CollisionCount, oldDS.Status.CollisionCount) {
|
||||
value := int32(0)
|
||||
if ds.Status.CollisionCount != nil {
|
||||
value = *ds.Status.CollisionCount
|
||||
}
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set.
|
||||
func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`"))
|
||||
}
|
||||
if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for daemonset"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...)
|
||||
// Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid.
|
||||
allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...)
|
||||
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
|
||||
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
|
||||
}
|
||||
if spec.Template.Spec.ActiveDeadlineSeconds != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("template", "spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in DaemonSet is not Supported"))
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.TemplateGeneration), fldPath.Child("templateGeneration"))...)
|
||||
|
||||
allErrs = append(allErrs, ValidateDaemonSetUpdateStrategy(&spec.UpdateStrategy, fldPath.Child("updateStrategy"))...)
|
||||
if spec.RevisionHistoryLimit != nil {
|
||||
// zero is a valid RevisionHistoryLimit
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateRollingUpdateDaemonSet validates a given RollingUpdateDaemonSet.
|
||||
func ValidateRollingUpdateDaemonSet(rollingUpdate *apps.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 {
|
||||
// MaxUnavailable cannot be 0.
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0"))
|
||||
}
|
||||
// Validate that MaxUnavailable is not more than 100%.
|
||||
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetUpdateStrategy validates a given DaemonSetUpdateStrategy.
|
||||
func ValidateDaemonSetUpdateStrategy(strategy *apps.DaemonSetUpdateStrategy, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch strategy.Type {
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
// Make sure RollingUpdate field isn't nil.
|
||||
if strategy.RollingUpdate == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), ""))
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, ValidateRollingUpdateDaemonSet(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
|
||||
default:
|
||||
validValues := []string{string(apps.RollingUpdateDaemonSetStrategyType), string(apps.OnDeleteDaemonSetStrategyType)}
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetName can be used to check whether the given daemon set name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidateDaemonSetName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// ValidateDeploymentName validates that the given name can be used as a deployment name.
|
||||
var ValidateDeploymentName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// ValidatePositiveIntOrPercent tests if a given value is a valid int or
|
||||
// percentage.
|
||||
func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch intOrPercent.Type {
|
||||
case intstr.String:
|
||||
for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg))
|
||||
}
|
||||
case intstr.Int:
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...)
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) {
|
||||
if intOrStringValue.Type != intstr.String {
|
||||
return 0, false
|
||||
}
|
||||
if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 {
|
||||
return 0, false
|
||||
}
|
||||
value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1])
|
||||
return value, true
|
||||
}
|
||||
|
||||
func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int {
|
||||
value, isPercent := getPercentValue(intOrStringValue)
|
||||
if isPercent {
|
||||
return value
|
||||
}
|
||||
return intOrStringValue.IntValue()
|
||||
}
|
||||
|
||||
// IsNotMoreThan100Percent tests is a value can be represented as a percentage
|
||||
// and if this value is not more than 100%.
|
||||
func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
value, isPercent := getPercentValue(intOrStringValue)
|
||||
if !isPercent || value <= 100 {
|
||||
return nil
|
||||
}
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateRollingUpdateDeployment validates a given RollingUpdateDeployment.
|
||||
func ValidateRollingUpdateDeployment(rollingUpdate *apps.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
|
||||
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 {
|
||||
// Both MaxSurge and MaxUnavailable cannot be zero.
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0"))
|
||||
}
|
||||
// Validate that MaxUnavailable is not more than 100%.
|
||||
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeploymentStrategy validates given DeploymentStrategy.
|
||||
func ValidateDeploymentStrategy(strategy *apps.DeploymentStrategy, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch strategy.Type {
|
||||
case apps.RecreateDeploymentStrategyType:
|
||||
if strategy.RollingUpdate != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(apps.RecreateDeploymentStrategyType+"'")))
|
||||
}
|
||||
case apps.RollingUpdateDeploymentStrategyType:
|
||||
// This should never happen since it's set and checked in defaults.go
|
||||
if strategy.RollingUpdate == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil"))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
|
||||
}
|
||||
default:
|
||||
validValues := []string{string(apps.RecreateDeploymentStrategyType), string(apps.RollingUpdateDeploymentStrategyType)}
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateRollback validates given RollbackConfig.
|
||||
func ValidateRollback(rollback *apps.RollbackConfig, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
v := rollback.Revision
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeploymentSpec validates given deployment spec.
|
||||
func ValidateDeploymentSpec(spec *apps.DeploymentSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
|
||||
if spec.Selector == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment"))
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector"))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
if spec.RevisionHistoryLimit != nil {
|
||||
// zero is a valid RevisionHistoryLimit
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
|
||||
}
|
||||
if spec.RollbackTo != nil {
|
||||
allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...)
|
||||
}
|
||||
if spec.ProgressDeadlineSeconds != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...)
|
||||
if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds"))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeploymentStatus validates given deployment status.
|
||||
func ValidateDeploymentStatus(status *apps.DeploymentStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...)
|
||||
if status.CollisionCount != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...)
|
||||
}
|
||||
msg := "cannot be greater than status.replicas"
|
||||
if status.UpdatedReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg))
|
||||
}
|
||||
if status.ReadyReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
|
||||
}
|
||||
if status.AvailableReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
|
||||
}
|
||||
if status.AvailableReplicas > status.ReadyReplicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeploymentUpdate tests if an update to a Deployment is valid.
|
||||
func ValidateDeploymentUpdate(update, old *apps.Deployment) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeploymentStatusUpdate tests if a an update to a Deployment status
|
||||
// is valid.
|
||||
func ValidateDeploymentStatusUpdate(update, old *apps.Deployment) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||
fldPath := field.NewPath("status")
|
||||
allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, fldPath)...)
|
||||
if apivalidation.IsDecremented(update.Status.CollisionCount, old.Status.CollisionCount) {
|
||||
value := int32(0)
|
||||
if update.Status.CollisionCount != nil {
|
||||
value = *update.Status.CollisionCount
|
||||
}
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("collisionCount"), value, "cannot be decremented"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeployment validates a given Deployment.
|
||||
func ValidateDeployment(obj *apps.Deployment) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDeploymentRollback validates a given DeploymentRollback.
|
||||
func ValidateDeploymentRollback(obj *apps.DeploymentRollback) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations"))
|
||||
if len(obj.Name) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required"))
|
||||
}
|
||||
allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetName can be used to check whether the given ReplicaSet
|
||||
// name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidateReplicaSetName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// ValidateReplicaSet tests if required fields in the ReplicaSet are set.
|
||||
func ValidateReplicaSet(rs *apps.ReplicaSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set.
|
||||
func ValidateReplicaSetUpdate(rs, oldRs *apps.ReplicaSet) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set.
|
||||
func ValidateReplicaSetStatusUpdate(rs, oldRs *apps.ReplicaSet) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidateReplicaSetStatus(rs.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetStatus validates a given ReplicaSetStatus.
|
||||
func ValidateReplicaSetStatus(status apps.ReplicaSetStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.FullyLabeledReplicas), fldPath.Child("fullyLabeledReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ObservedGeneration), fldPath.Child("observedGeneration"))...)
|
||||
msg := "cannot be greater than status.replicas"
|
||||
if status.FullyLabeledReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg))
|
||||
}
|
||||
if status.ReadyReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
|
||||
}
|
||||
if status.AvailableReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
|
||||
}
|
||||
if status.AvailableReplicas > status.ReadyReplicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set.
|
||||
func ValidateReplicaSetSpec(spec *apps.ReplicaSetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
|
||||
if spec.Selector == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment"))
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector"))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodTemplateSpecForReplicaSet validates the given template and ensures that it is in accordance with the desired selector and replicas.
|
||||
func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if template == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath, ""))
|
||||
} else {
|
||||
if !selector.Empty() {
|
||||
// Verify that the ReplicaSet selector matches the labels in template.
|
||||
labels := labels.Set(template.Labels)
|
||||
if !selector.Matches(labels) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
|
||||
}
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)
|
||||
if replicas > 1 {
|
||||
allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
|
||||
}
|
||||
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
|
||||
if template.Spec.RestartPolicy != api.RestartPolicyAlways {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
|
||||
}
|
||||
if template.Spec.ActiveDeadlineSeconds != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicaSet is not Supported"))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS
generated
vendored
@ -16,5 +16,4 @@ reviewers:
|
||||
- madhusudancs
|
||||
- mml
|
||||
- mbohlool
|
||||
- david-mcmahon
|
||||
- jianhuiz
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
generated
vendored
@ -204,6 +204,12 @@ const (
|
||||
// (for example length of queue in cloud messaging service, or
|
||||
// QPS from loadbalancer running outside of cluster).
|
||||
ExternalMetricSourceType MetricSourceType = "External"
|
||||
// ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as
|
||||
// specified in requests and limits, describing a single container in each pod in the current
|
||||
// scale target (e.g. CPU or memory). Such metrics are built in to
|
||||
// Kubernetes, and have special scaling options on top of those available
|
||||
// to normal per-pod metrics (the "pods" source).
|
||||
ContainerResourceMetricSourceType MetricSourceType = "ContainerResource"
|
||||
)
|
||||
|
||||
// MetricSpec specifies how to scale based on a single metric
|
||||
@ -229,6 +235,13 @@ type MetricSpec struct {
|
||||
// to normal per-pod metrics using the "pods" source.
|
||||
// +optional
|
||||
Resource *ResourceMetricSource
|
||||
// ContainerResource refers to a resource metric (such as those specified in
|
||||
// requests and limits) known to Kubernetes describing a single container in each pod of the
|
||||
// current scale target (e.g. CPU or memory). Such metrics are built in to
|
||||
// Kubernetes, and have special scaling options on top of those available
|
||||
// to normal per-pod metrics using the "pods" source.
|
||||
// +optional
|
||||
ContainerResource *ContainerResourceMetricSource
|
||||
// External refers to a global metric that is not associated
|
||||
// with any Kubernetes object. It allows autoscaling based on information
|
||||
// coming from components running outside of cluster
|
||||
@ -271,6 +284,22 @@ type ResourceMetricSource struct {
|
||||
Target MetricTarget
|
||||
}
|
||||
|
||||
// ContainerResourceMetricSource indicates how to scale on a resource metric known to
|
||||
// Kubernetes, as specified in the requests and limits, describing a single container in
|
||||
// each of the pods of the current scale target(e.g. CPU or memory). The values will be
|
||||
// averaged together before being compared to the target. Such metrics are built into
|
||||
// Kubernetes, and have special scaling options on top of those available to
|
||||
// normal per-pod metrics using the "pods" source. Only one "target" type
|
||||
// should be set.
|
||||
type ContainerResourceMetricSource struct {
|
||||
// name is the name of the of the resource
|
||||
Name api.ResourceName
|
||||
// container is the name of the container in the pods of the scaling target.
|
||||
Container string
|
||||
// target specifies the target value for the given metric
|
||||
Target MetricTarget
|
||||
}
|
||||
|
||||
// ExternalMetricSource indicates how to scale on a metric not associated with
|
||||
// any Kubernetes object (for example length of queue in cloud
|
||||
// messaging service, or QPS from loadbalancer running outside of cluster).
|
||||
@ -420,6 +449,13 @@ type MetricStatus struct {
|
||||
// to normal per-pod metrics using the "pods" source.
|
||||
// +optional
|
||||
Resource *ResourceMetricStatus
|
||||
// ContainerResource refers to a resource metric (such as those specified in
|
||||
// requests and limits) known to Kubernetes describing a single container in each pod in the
|
||||
// current scale target (e.g. CPU or memory). Such metrics are built in to
|
||||
// Kubernetes, and have special scaling options on top of those available
|
||||
// to normal per-pod metrics using the "pods" source.
|
||||
// +optional
|
||||
ContainerResource *ContainerResourceMetricStatus
|
||||
// External refers to a global metric that is not associated
|
||||
// with any Kubernetes object. It allows autoscaling based on information
|
||||
// coming from components running outside of cluster
|
||||
@ -456,6 +492,18 @@ type ResourceMetricStatus struct {
|
||||
Current MetricValueStatus
|
||||
}
|
||||
|
||||
// ContainerResourceMetricStatus indicates the current value of a resource metric known to
|
||||
// Kubernetes, as specified in requests and limits, describing each pod in the
|
||||
// current scale target (e.g. CPU or memory). Such metrics are built in to
|
||||
// Kubernetes, and have special scaling options on top of those available to
|
||||
// normal per-pod metrics using the "pods" source.
|
||||
type ContainerResourceMetricStatus struct {
|
||||
// Name is the name of the resource in question.
|
||||
Name api.ResourceName
|
||||
Container string
|
||||
Current MetricValueStatus
|
||||
}
|
||||
|
||||
// ExternalMetricStatus indicates the current value of a global metric
|
||||
// not associated with any Kubernetes object.
|
||||
type ExternalMetricStatus struct {
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
44
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
@ -25,6 +25,40 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) {
|
||||
*out = *in
|
||||
in.Target.DeepCopyInto(&out.Target)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource.
|
||||
func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ContainerResourceMetricSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) {
|
||||
*out = *in
|
||||
in.Current.DeepCopyInto(&out.Current)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus.
|
||||
func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ContainerResourceMetricStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
|
||||
*out = *in
|
||||
@ -340,6 +374,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
|
||||
*out = new(ResourceMetricSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ContainerResource != nil {
|
||||
in, out := &in.ContainerResource, &out.ContainerResource
|
||||
*out = new(ContainerResourceMetricSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.External != nil {
|
||||
in, out := &in.External, &out.External
|
||||
*out = new(ExternalMetricSource)
|
||||
@ -376,6 +415,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
|
||||
*out = new(ResourceMetricStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ContainerResource != nil {
|
||||
in, out := &in.ContainerResource, &out.ContainerResource
|
||||
*out = new(ContainerResourceMetricStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.External != nil {
|
||||
in, out := &in.External, &out.External
|
||||
*out = new(ExternalMetricStatus)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS
generated
vendored
@ -16,7 +16,6 @@ reviewers:
|
||||
- errordeveloper
|
||||
- mml
|
||||
- mbohlool
|
||||
- david-mcmahon
|
||||
- jianhuiz
|
||||
labels:
|
||||
- sig/apps
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
@ -155,6 +155,7 @@ type JobSpec struct {
|
||||
type JobStatus struct {
|
||||
|
||||
// The latest available observations of an object's current state.
|
||||
// When a job fails, one of the conditions will have type == "Failed".
|
||||
// +optional
|
||||
Conditions []JobCondition
|
||||
|
||||
@ -167,6 +168,7 @@ type JobStatus struct {
|
||||
// Represents time when the job was completed. It is not guaranteed to
|
||||
// be set in happens-before order across separate operations.
|
||||
// It is represented in RFC3339 form and is in UTC.
|
||||
// The completion time is only set when the job finishes successfully.
|
||||
// +optional
|
||||
CompletionTime *metav1.Time
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS
generated
vendored
@ -6,7 +6,6 @@ approvers:
|
||||
- smarterclayton
|
||||
- thockin
|
||||
- liggitt
|
||||
# - bgrant0607 # manual escalations only
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
@ -28,7 +27,6 @@ reviewers:
|
||||
- sttts
|
||||
- dchen1107
|
||||
- saad-ali
|
||||
- zmerlynn
|
||||
- luxas
|
||||
- janetkuo
|
||||
- justinsb
|
||||
@ -36,7 +34,6 @@ reviewers:
|
||||
- ncdc
|
||||
- tallclair
|
||||
- yifan-gu
|
||||
- eparis
|
||||
- mwielgus
|
||||
- soltysh
|
||||
- piosz
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
generated
vendored
@ -39,17 +39,20 @@ const (
|
||||
|
||||
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
|
||||
// to all containers of a pod.
|
||||
// Deprecated: set a pod security context `seccompProfile` field.
|
||||
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
|
||||
|
||||
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
|
||||
// to one container of a pod.
|
||||
// Deprecated: set a container security context `seccompProfile` field.
|
||||
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
|
||||
|
||||
// SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
|
||||
// Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
|
||||
SeccompProfileRuntimeDefault string = "runtime/default"
|
||||
|
||||
// DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
|
||||
// This is now deprecated and should be replaced by SeccompProfileRuntimeDefault.
|
||||
// Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
|
||||
DeprecatedSeccompProfileDockerDefault string = "docker/default"
|
||||
|
||||
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
|
||||
@ -61,10 +64,6 @@ const (
|
||||
// This annotation can be attached to node.
|
||||
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||
|
||||
// BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by
|
||||
// the kubelet prior to running
|
||||
BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint"
|
||||
|
||||
// NonConvertibleAnnotationPrefix annotation key prefix used to identify non-convertible json paths.
|
||||
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
|
||||
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
@ -267,7 +267,10 @@ func IsIntegerResourceName(str string) bool {
|
||||
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
|
||||
// the objective is not to perform validation here
|
||||
func IsServiceIPSet(service *core.Service) bool {
|
||||
return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != ""
|
||||
// This function assumes that the service is semantically validated
|
||||
// it does not test if the IP is valid, just makes sure that it is set.
|
||||
return len(service.Spec.ClusterIP) > 0 &&
|
||||
service.Spec.ClusterIP != core.ClusterIPNone
|
||||
}
|
||||
|
||||
var standardFinalizers = sets.NewString(
|
||||
@ -494,3 +497,38 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func toResourceNames(resources core.ResourceList) []core.ResourceName {
|
||||
result := []core.ResourceName{}
|
||||
for resourceName := range resources {
|
||||
result = append(result, resourceName)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func toSet(resourceNames []core.ResourceName) sets.String {
|
||||
result := sets.NewString()
|
||||
for _, resourceName := range resourceNames {
|
||||
result.Insert(string(resourceName))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// toContainerResourcesSet returns a set of resources names in container resource requirements
|
||||
func toContainerResourcesSet(ctr *core.Container) sets.String {
|
||||
resourceNames := toResourceNames(ctr.Resources.Requests)
|
||||
resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...)
|
||||
return toSet(resourceNames)
|
||||
}
|
||||
|
||||
// ToPodResourcesSet returns a set of resource names in all containers in a pod.
|
||||
func ToPodResourcesSet(podSpec *core.PodSpec) sets.String {
|
||||
result := sets.NewString()
|
||||
for i := range podSpec.InitContainers {
|
||||
result = result.Union(toContainerResourcesSet(&podSpec.InitContainers[i]))
|
||||
}
|
||||
for i := range podSpec.Containers {
|
||||
result = result.Union(toContainerResourcesSet(&podSpec.Containers[i]))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS
generated
vendored
@ -8,4 +8,3 @@ reviewers:
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- dims
|
||||
- david-mcmahon
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go
generated
vendored
@ -34,23 +34,23 @@ type ContainerVisitorWithPath func(container *api.Container, path *field.Path) b
|
||||
// of every container in the given pod spec and the field.Path to that container.
|
||||
// If visitor returns false, visiting is short-circuited. VisitContainersWithPath returns true if visiting completes,
|
||||
// false if visiting was short-circuited.
|
||||
func VisitContainersWithPath(podSpec *api.PodSpec, visitor ContainerVisitorWithPath) bool {
|
||||
path := field.NewPath("spec", "initContainers")
|
||||
func VisitContainersWithPath(podSpec *api.PodSpec, specPath *field.Path, visitor ContainerVisitorWithPath) bool {
|
||||
fldPath := specPath.Child("initContainers")
|
||||
for i := range podSpec.InitContainers {
|
||||
if !visitor(&podSpec.InitContainers[i], path.Index(i)) {
|
||||
if !visitor(&podSpec.InitContainers[i], fldPath.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
path = field.NewPath("spec", "containers")
|
||||
fldPath = specPath.Child("containers")
|
||||
for i := range podSpec.Containers {
|
||||
if !visitor(&podSpec.Containers[i], path.Index(i)) {
|
||||
if !visitor(&podSpec.Containers[i], fldPath.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
|
||||
path = field.NewPath("spec", "ephemeralContainers")
|
||||
fldPath = specPath.Child("ephemeralContainers")
|
||||
for i := range podSpec.EphemeralContainers {
|
||||
if !visitor((*api.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), path.Index(i)) {
|
||||
if !visitor((*api.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), fldPath.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/apis/core/resource.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/apis/core/resource.go
generated
vendored
@ -26,40 +26,33 @@ func (rn ResourceName) String() string {
|
||||
|
||||
// CPU returns the CPU limit if specified.
|
||||
func (rl *ResourceList) CPU() *resource.Quantity {
|
||||
if val, ok := (*rl)[ResourceCPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.DecimalSI}
|
||||
return rl.Name(ResourceCPU, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// Memory returns the Memory limit if specified.
|
||||
func (rl *ResourceList) Memory() *resource.Quantity {
|
||||
if val, ok := (*rl)[ResourceMemory]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.BinarySI}
|
||||
return rl.Name(ResourceMemory, resource.BinarySI)
|
||||
}
|
||||
|
||||
// Storage returns the Storage limit if specified.
|
||||
func (rl *ResourceList) Storage() *resource.Quantity {
|
||||
if val, ok := (*rl)[ResourceStorage]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.BinarySI}
|
||||
return rl.Name(ResourceStorage, resource.BinarySI)
|
||||
}
|
||||
|
||||
// Pods returns the list of pods
|
||||
func (rl *ResourceList) Pods() *resource.Quantity {
|
||||
if val, ok := (*rl)[ResourcePods]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
return rl.Name(ResourcePods, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// StorageEphemeral returns the list of ephemeral storage volumes, if any
|
||||
func (rl *ResourceList) StorageEphemeral() *resource.Quantity {
|
||||
if val, ok := (*rl)[ResourceEphemeralStorage]; ok {
|
||||
return rl.Name(ResourceEphemeralStorage, resource.BinarySI)
|
||||
}
|
||||
|
||||
// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format.
|
||||
func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity {
|
||||
if val, ok := (*rl)[name]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
return &resource.Quantity{Format: defaultFormat}
|
||||
}
|
||||
|
333
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
333
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
@ -154,9 +154,36 @@ type VolumeSource struct {
|
||||
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
|
||||
// +optional
|
||||
StorageOS *StorageOSVolumeSource
|
||||
// CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
|
||||
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// +optional
|
||||
CSI *CSIVolumeSource
|
||||
// Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature).
|
||||
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
|
||||
// and deleted when the pod is removed.
|
||||
//
|
||||
// Use this if:
|
||||
// a) the volume is only needed while the pod runs,
|
||||
// b) features of normal volumes like restoring from snapshot or capacity
|
||||
// tracking are needed,
|
||||
// c) the storage driver is specified through a storage class, and
|
||||
// d) the storage driver supports dynamic volume provisioning through
|
||||
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
|
||||
// information on the connection between this volume type
|
||||
// and PersistentVolumeClaim).
|
||||
//
|
||||
// Use PersistentVolumeClaim or one of the vendor-specific
|
||||
// APIs for volumes that persist for longer than the lifecycle
|
||||
// of an individual pod.
|
||||
//
|
||||
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
|
||||
// be used that way - see the documentation of the driver for
|
||||
// more information.
|
||||
//
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource
|
||||
}
|
||||
|
||||
// PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs.
|
||||
@ -418,17 +445,13 @@ type PersistentVolumeClaimSpec struct {
|
||||
// +optional
|
||||
VolumeMode *PersistentVolumeMode
|
||||
// This field can be used to specify either:
|
||||
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta)
|
||||
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
|
||||
// * An existing PVC (PersistentVolumeClaim)
|
||||
// * An existing custom resource/object that implements data population (Alpha)
|
||||
// In order to use VolumeSnapshot object types, the appropriate feature gate
|
||||
// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource)
|
||||
// * An existing custom resource that implements data population (Alpha)
|
||||
// In order to use custom resource types that implement data population,
|
||||
// the AnyVolumeDataSource feature gate must be enabled.
|
||||
// If the provisioner or an external controller can support the specified data source,
|
||||
// it will create a new volume based on the contents of the specified data source.
|
||||
// If the specified data source is not supported, the volume will
|
||||
// not be created and the failure will be reported as an event.
|
||||
// In the future, we plan to support more data source types and the behavior
|
||||
// of the provisioner may change.
|
||||
// +optional
|
||||
DataSource *TypedLocalObjectReference
|
||||
}
|
||||
@ -1670,6 +1693,53 @@ type CSIVolumeSource struct {
|
||||
NodePublishSecretRef *LocalObjectReference
|
||||
}
|
||||
|
||||
// EphemeralVolumeSource represents an ephemeral volume that is handled by a normal storage driver.
|
||||
type EphemeralVolumeSource struct {
|
||||
// VolumeClaimTemplate will be used to create a stand-alone PVC to provision the volume.
|
||||
// The pod in which this EphemeralVolumeSource is embedded will be the
|
||||
// owner of the PVC, i.e. the PVC will be deleted together with the
|
||||
// pod. The name of the PVC will be `<pod name>-<volume name>` where
|
||||
// `<volume name>` is the name from the `PodSpec.Volumes` array
|
||||
// entry. Pod validation will reject the pod if the concatenated name
|
||||
// is not valid for a PVC (for example, too long).
|
||||
//
|
||||
// An existing PVC with that name that is not owned by the pod
|
||||
// will *not* be used for the pod to avoid using an unrelated
|
||||
// volume by mistake. Starting the pod is then blocked until
|
||||
// the unrelated PVC is removed. If such a pre-created PVC is
|
||||
// meant to be used by the pod, the PVC has to updated with an
|
||||
// owner reference to the pod once the pod exists. Normally
|
||||
// this should not be necessary, but it may be useful when
|
||||
// manually reconstructing a broken cluster.
|
||||
//
|
||||
// This field is read-only and no changes will be made by Kubernetes
|
||||
// to the PVC after it has been created.
|
||||
//
|
||||
// Required, must not be nil.
|
||||
VolumeClaimTemplate *PersistentVolumeClaimTemplate
|
||||
|
||||
// ReadOnly specifies a read-only configuration for the volume.
|
||||
// Defaults to false (read/write).
|
||||
// +optional
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
|
||||
type PersistentVolumeClaimTemplate struct {
|
||||
// ObjectMeta may contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec for the PersistentVolumeClaim. The entire content is
|
||||
// copied unchanged into the PVC that gets created from this
|
||||
// template. The same fields as in a PersistentVolumeClaim
|
||||
// are also valid here.
|
||||
Spec PersistentVolumeClaimSpec
|
||||
}
|
||||
|
||||
// ContainerPort represents a network port in a single container
|
||||
type ContainerPort struct {
|
||||
// Optional: If specified, this must be an IANA_SVC_NAME Each named port
|
||||
@ -1774,7 +1844,7 @@ type EnvVar struct {
|
||||
// EnvVarSource represents a source for the value of an EnvVar.
|
||||
// Only one of its fields may be set.
|
||||
type EnvVarSource struct {
|
||||
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
|
||||
// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
|
||||
// metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
// +optional
|
||||
FieldRef *ObjectFieldSelector
|
||||
@ -2693,6 +2763,12 @@ type PodSpec struct {
|
||||
// If not specified, the pod will not have a domainname at all.
|
||||
// +optional
|
||||
Subdomain string
|
||||
// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
|
||||
// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
|
||||
// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
|
||||
// If a pod does not have FQDN, this has no effect.
|
||||
// +optional
|
||||
SetHostnameAsFQDN *bool
|
||||
// If specified, the pod's scheduling constraints
|
||||
// +optional
|
||||
Affinity *Affinity
|
||||
@ -2725,7 +2801,7 @@ type PodSpec struct {
|
||||
// PreemptionPolicy is the Policy for preempting pods with lower priority.
|
||||
// One of Never, PreemptLowerPriority.
|
||||
// Defaults to PreemptLowerPriority if unset.
|
||||
// This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.
|
||||
// This field is beta-level, gated by the NonPreemptingPriority feature-gate.
|
||||
// +optional
|
||||
PreemptionPolicy *PreemptionPolicy
|
||||
// Specifies the DNS parameters of a pod.
|
||||
@ -2743,8 +2819,7 @@ type PodSpec struct {
|
||||
// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
|
||||
// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
|
||||
// empty definition that uses the default runtime handler.
|
||||
// More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
|
||||
// This is a beta feature as of Kubernetes v1.14.
|
||||
// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class/README.md
|
||||
// +optional
|
||||
RuntimeClassName *string
|
||||
// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
|
||||
@ -2764,7 +2839,6 @@ type PodSpec struct {
|
||||
EnableServiceLinks *bool
|
||||
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
|
||||
// domains. Scheduler will schedule pods in a way which abides by the constraints.
|
||||
// This field is only honored by clusters that enable the EvenPodsSpread feature.
|
||||
// All topologySpreadConstraints are ANDed.
|
||||
// +optional
|
||||
TopologySpreadConstraints []TopologySpreadConstraint
|
||||
@ -2885,15 +2959,43 @@ type PodSecurityContext struct {
|
||||
// volume types which support fsGroup based ownership(and permissions).
|
||||
// It will have no effect on ephemeral volume types such as: secret, configmaps
|
||||
// and emptydir.
|
||||
// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
|
||||
// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
|
||||
// +optional
|
||||
FSGroupChangePolicy *PodFSGroupChangePolicy
|
||||
// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
|
||||
// sysctls (by the container runtime) might fail to launch.
|
||||
// +optional
|
||||
Sysctls []Sysctl
|
||||
// The seccomp options to use by the containers in this pod.
|
||||
// +optional
|
||||
SeccompProfile *SeccompProfile
|
||||
}
|
||||
|
||||
// SeccompProfile defines a pod/container's seccomp profile settings.
|
||||
// Only one profile source may be set.
|
||||
// +union
|
||||
type SeccompProfile struct {
|
||||
// +unionDiscriminator
|
||||
Type SeccompProfileType
|
||||
// Load a profile defined in static file on the node.
|
||||
// The profile must be preconfigured on the node to work.
|
||||
// LocalhostProfile cannot be an absolute nor a descending path.
|
||||
// +optional
|
||||
LocalhostProfile *string
|
||||
}
|
||||
|
||||
// SeccompProfileType defines the supported seccomp profile types.
|
||||
type SeccompProfileType string
|
||||
|
||||
const (
|
||||
// SeccompProfileTypeUnconfined is when no seccomp profile is applied (A.K.A. unconfined).
|
||||
SeccompProfileTypeUnconfined SeccompProfileType = "Unconfined"
|
||||
// SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile.
|
||||
SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault"
|
||||
// SeccompProfileTypeLocalhost represents custom made profiles stored on the node's disk.
|
||||
SeccompProfileTypeLocalhost SeccompProfileType = "Localhost"
|
||||
)
|
||||
|
||||
// PodQOSClass defines the supported qos classes of Pods.
|
||||
type PodQOSClass string
|
||||
|
||||
@ -3373,12 +3475,23 @@ const (
|
||||
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
|
||||
)
|
||||
|
||||
// These are the valid conditions of a service.
|
||||
const (
|
||||
// LoadBalancerPortsError represents the condition of the requested ports
|
||||
// on the cloud load balancer instance.
|
||||
LoadBalancerPortsError = "LoadBalancerPortsError"
|
||||
)
|
||||
|
||||
// ServiceStatus represents the current status of a service
|
||||
type ServiceStatus struct {
|
||||
// LoadBalancer contains the current status of the load-balancer,
|
||||
// if one is present.
|
||||
// +optional
|
||||
LoadBalancer LoadBalancerStatus
|
||||
|
||||
// Current service condition
|
||||
// +optional
|
||||
Conditions []metav1.Condition
|
||||
}
|
||||
|
||||
// LoadBalancerStatus represents the status of a load-balancer
|
||||
@ -3401,10 +3514,20 @@ type LoadBalancerIngress struct {
|
||||
// (typically AWS load-balancers)
|
||||
// +optional
|
||||
Hostname string
|
||||
|
||||
// Ports is a list of records of service ports
|
||||
// If used, every port defined in the service should have an entry in it
|
||||
// +optional
|
||||
Ports []PortStatus
|
||||
}
|
||||
|
||||
const (
|
||||
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
|
||||
MaxServiceTopologyKeys = 16
|
||||
)
|
||||
|
||||
// IPFamily represents the IP Family (IPv4 or IPv6). This type is used
|
||||
// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily)
|
||||
// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
|
||||
type IPFamily string
|
||||
|
||||
const (
|
||||
@ -3412,8 +3535,29 @@ const (
|
||||
IPv4Protocol IPFamily = "IPv4"
|
||||
// IPv6Protocol indicates that this IP is IPv6 protocol
|
||||
IPv6Protocol IPFamily = "IPv6"
|
||||
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
|
||||
MaxServiceTopologyKeys = 16
|
||||
)
|
||||
|
||||
// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service
|
||||
type IPFamilyPolicyType string
|
||||
|
||||
const (
|
||||
// IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily.
|
||||
// The IPFamily assigned is based on the default IPFamily used by the cluster
|
||||
// or as identified by service.spec.ipFamilies field
|
||||
IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack"
|
||||
// IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when
|
||||
// the cluster is configured for dual-stack. If the cluster is not configured
|
||||
// for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not
|
||||
// set in service.spec.ipFamilies then the service will be assigned the default IPFamily
|
||||
// configured on the cluster
|
||||
IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack"
|
||||
// IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using
|
||||
// IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The
|
||||
// IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If
|
||||
// service.spec.ipFamilies was not provided then it will be assigned according to how they are
|
||||
// configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative
|
||||
// IPFamily will be added by apiserver
|
||||
IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack"
|
||||
)
|
||||
|
||||
// ServiceSpec describes the attributes that a user creates on a service
|
||||
@ -3458,6 +3602,36 @@ type ServiceSpec struct {
|
||||
// +optional
|
||||
ClusterIP string
|
||||
|
||||
// ClusterIPs identifies all the ClusterIPs assigned to this
|
||||
// service. ClusterIPs are assigned or reserved based on the values of
|
||||
// service.spec.ipFamilies. A maximum of two entries (dual-stack IPs) are
|
||||
// allowed in ClusterIPs. The IPFamily of each ClusterIP must match
|
||||
// values provided in service.spec.ipFamilies. Clients using ClusterIPs must
|
||||
// keep it in sync with ClusterIP (if provided) by having ClusterIP matching
|
||||
// first element of ClusterIPs.
|
||||
// +optional
|
||||
ClusterIPs []string
|
||||
|
||||
// IPFamilies identifies all the IPFamilies assigned for this Service. If a value
|
||||
// was not provided for IPFamilies it will be defaulted based on the cluster
|
||||
// configuration and the value of service.spec.ipFamilyPolicy. A maximum of two
|
||||
// values (dual-stack IPFamilies) are allowed in IPFamilies. IPFamilies field is
|
||||
// conditionally mutable: it allows for adding or removing a secondary IPFamily,
|
||||
// but it does not allow changing the primary IPFamily of the service.
|
||||
// +optional
|
||||
IPFamilies []IPFamily
|
||||
|
||||
// IPFamilyPolicy represents the dual-stack-ness requested or required by this
|
||||
// Service. If there is no value provided, then this Service will be considered
|
||||
// SingleStack (single IPFamily). Services can be SingleStack (single IPFamily),
|
||||
// PreferDualStack (two dual-stack IPFamilies on dual-stack clusters or single
|
||||
// IPFamily on single-stack clusters), or RequireDualStack (two dual-stack IPFamilies
|
||||
// on dual-stack configured clusters, otherwise fail). The IPFamilies and ClusterIPs assigned
|
||||
// to this service can be controlled by service.spec.ipFamilies and service.spec.clusterIPs
|
||||
// respectively.
|
||||
// +optional
|
||||
IPFamilyPolicy *IPFamilyPolicyType
|
||||
|
||||
// ExternalName is the external reference that kubedns or equivalent will
|
||||
// return as a CNAME record for this service. No proxying will be involved.
|
||||
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
|
||||
@ -3508,25 +3682,17 @@ type ServiceSpec struct {
|
||||
// +optional
|
||||
HealthCheckNodePort int32
|
||||
|
||||
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
|
||||
// must publish the notReadyAddresses of subsets for the Endpoints associated with
|
||||
// the Service. The default value is false.
|
||||
// The primary use case for setting this field is to use a StatefulSet's Headless Service
|
||||
// to propagate SRV records for its Pods without respect to their readiness for purpose
|
||||
// of peer discovery.
|
||||
// publishNotReadyAddresses indicates that any agent which deals with endpoints for this
|
||||
// Service should disregard any indications of ready/not-ready.
|
||||
// The primary use case for setting this field is for a StatefulSet's Headless Service to
|
||||
// propagate SRV DNS records for its Pods for the purpose of peer discovery.
|
||||
// The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
|
||||
// Services interpret this to mean that all endpoints are considered "ready" even if the
|
||||
// Pods themselves are not. Agents which consume only Kubernetes generated endpoints
|
||||
// through the Endpoints or EndpointSlice resources can safely assume this behavior.
|
||||
// +optional
|
||||
PublishNotReadyAddresses bool
|
||||
|
||||
// ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs.
|
||||
// IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is
|
||||
// available in the cluster. If no IP family is requested, the cluster's primary IP family will be used.
|
||||
// Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which
|
||||
// allocate external load-balancers should use the same IP family. Endpoints for this Service will be of
|
||||
// this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the
|
||||
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
|
||||
// +optional
|
||||
IPFamily *IPFamily
|
||||
|
||||
// topologyKeys is a preference-order list of topology keys which
|
||||
// implementations of services should use to preferentially sort endpoints
|
||||
// when accessing this Service, it can not be used at the same time as
|
||||
@ -3539,8 +3705,18 @@ type ServiceSpec struct {
|
||||
// The special value "*" may be used to mean "any topology". This catch-all
|
||||
// value, if used, only makes sense as the last value in the list.
|
||||
// If this is not specified or empty, no topology constraints will be applied.
|
||||
// This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.
|
||||
// +optional
|
||||
TopologyKeys []string
|
||||
|
||||
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
|
||||
// allocated for services with type LoadBalancer. Default is "true". It may be
|
||||
// set to "false" if the cluster load-balancer does not rely on NodePorts.
|
||||
// allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer
|
||||
// and will be cleared if the type is changed to any other type.
|
||||
// This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
|
||||
// +optional
|
||||
AllocateLoadBalancerNodePorts *bool
|
||||
}
|
||||
|
||||
// ServicePort represents the port on which the service is exposed
|
||||
@ -3560,7 +3736,6 @@ type ServicePort struct {
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names such as
|
||||
// mycompany.com/my-custom-protocol.
|
||||
// Field can be enabled with ServiceAppProtocol feature gate.
|
||||
// +optional
|
||||
AppProtocol *string
|
||||
|
||||
@ -3711,7 +3886,6 @@ type EndpointPort struct {
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names such as
|
||||
// mycompany.com/my-custom-protocol.
|
||||
// Field can be enabled with ServiceAppProtocol feature gate.
|
||||
// +optional
|
||||
AppProtocol *string
|
||||
}
|
||||
@ -3816,7 +3990,7 @@ type NodeSystemInfo struct {
|
||||
MachineID string
|
||||
// SystemUUID reported by the node. For unique machine identification
|
||||
// MachineID is preferred. This field is specific to Red Hat hosts
|
||||
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
|
||||
// https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
|
||||
SystemUUID string
|
||||
// Boot ID reported by the node.
|
||||
BootID string
|
||||
@ -4037,7 +4211,7 @@ type NodeAddress struct {
|
||||
}
|
||||
|
||||
// NodeResources is an object for conveying resource information about a node.
|
||||
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
|
||||
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
|
||||
type NodeResources struct {
|
||||
// Capacity represents the available resources of a node
|
||||
// +optional
|
||||
@ -4445,14 +4619,19 @@ const (
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Event is a report of an event somewhere in the cluster.
|
||||
// Event is a report of an event somewhere in the cluster. Events
|
||||
// have a limited retention time and triggers and messages may evolve
|
||||
// with time. Event consumers should not rely on the timing of an event
|
||||
// with a given Reason reflecting a consistent underlying trigger, or the
|
||||
// continued existence of events with that Reason. Events should be
|
||||
// treated as informative, best-effort, supplemental data.
|
||||
// TODO: Decide whether to store these separately or with the object they apply to.
|
||||
type Event struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Required. The object that this event is about. Mapped to events.Event.regarding
|
||||
// The object that this event is about. Mapped to events.Event.regarding
|
||||
// +optional
|
||||
InvolvedObject ObjectReference
|
||||
|
||||
@ -4519,21 +4698,8 @@ type EventSeries struct {
|
||||
Count int32
|
||||
// Time of the last occurrence observed
|
||||
LastObservedTime metav1.MicroTime
|
||||
// State of this Series: Ongoing or Finished
|
||||
// Deprecated. Planned removal for 1.18
|
||||
State EventSeriesState
|
||||
}
|
||||
|
||||
// EventSeriesState defines the state of event series
|
||||
type EventSeriesState string
|
||||
|
||||
// These are valid values of event series state
|
||||
const (
|
||||
EventSeriesStateOngoing EventSeriesState = "Ongoing"
|
||||
EventSeriesStateFinished EventSeriesState = "Finished"
|
||||
EventSeriesStateUnknown EventSeriesState = "Unknown"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EventList is a list of events.
|
||||
@ -4778,7 +4944,7 @@ type Secret struct {
|
||||
|
||||
// Immutable field, if set, ensures that data stored in the Secret cannot
|
||||
// be updated (only object metadata can be modified).
|
||||
// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
|
||||
// This is a beta field enabled by ImmutableEphemeralVolumes feature gate.
|
||||
// +optional
|
||||
Immutable *bool
|
||||
|
||||
@ -4787,7 +4953,7 @@ type Secret struct {
|
||||
// base64 encoded string, representing the arbitrary (possibly non-string)
|
||||
// data value here.
|
||||
// +optional
|
||||
Data map[string][]byte
|
||||
Data map[string][]byte `datapolicy:"password,security-key,token"`
|
||||
|
||||
// Used to facilitate programmatic handling of secret data.
|
||||
// +optional
|
||||
@ -4906,7 +5072,7 @@ type ConfigMap struct {
|
||||
|
||||
// Immutable field, if set, ensures that data stored in the ConfigMap cannot
|
||||
// be updated (only object metadata can be modified).
|
||||
// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
|
||||
// This is a beta field enabled by ImmutableEphemeralVolumes feature gate.
|
||||
// +optional
|
||||
Immutable *bool
|
||||
|
||||
@ -5003,6 +5169,7 @@ type ComponentCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
|
||||
// Deprecated: This API is deprecated in v1.19+
|
||||
type ComponentStatus struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
@ -5015,6 +5182,7 @@ type ComponentStatus struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ComponentStatusList represents the list of component statuses
|
||||
// Deprecated: This API is deprecated in v1.19+
|
||||
type ComponentStatusList struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
@ -5081,6 +5249,11 @@ type SecurityContext struct {
|
||||
// readonly paths and masked paths.
|
||||
// +optional
|
||||
ProcMount *ProcMountType
|
||||
// The seccomp options to use by this container. If seccomp options are
|
||||
// provided at both the pod & container level, the container options
|
||||
// override the pod options.
|
||||
// +optional
|
||||
SeccompProfile *SeccompProfile
|
||||
}
|
||||
|
||||
// ProcMountType defines the type of proc mount
|
||||
@ -5187,8 +5360,8 @@ const (
|
||||
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
|
||||
type TopologySpreadConstraint struct {
|
||||
// MaxSkew describes the degree to which pods may be unevenly distributed.
|
||||
// It's the maximum permitted difference between the number of matching pods in
|
||||
// any two topology domains of a given topology type.
|
||||
// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
|
||||
// between the number of matching pods in the target topology and the global minimum.
|
||||
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
// labelSelector spread as 1/1/0:
|
||||
// +-------+-------+-------+
|
||||
@ -5200,6 +5373,8 @@ type TopologySpreadConstraint struct {
|
||||
// scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)
|
||||
// violate MaxSkew(1).
|
||||
// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
|
||||
// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
|
||||
// to topologies that satisfy it.
|
||||
// It's a required field. Default value is 1 and 0 is not allowed.
|
||||
MaxSkew int32
|
||||
// TopologyKey is the key of node labels. Nodes that have a label with this key
|
||||
@ -5210,10 +5385,13 @@ type TopologySpreadConstraint struct {
|
||||
TopologyKey string
|
||||
// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
|
||||
// the spread constraint.
|
||||
// - DoNotSchedule (default) tells the scheduler not to schedule it
|
||||
// - ScheduleAnyway tells the scheduler to still schedule it
|
||||
// It's considered as "Unsatisfiable" if and only if placing incoming pod on any
|
||||
// topology violates "MaxSkew".
|
||||
// - DoNotSchedule (default) tells the scheduler not to schedule it.
|
||||
// - ScheduleAnyway tells the scheduler to schedule the pod in any location,
|
||||
// but giving higher precedence to topologies that would help reduce the
|
||||
// skew.
|
||||
// A constraint is considered "Unsatisfiable" for an incoming pod
|
||||
// if and only if every possible node assigment for that pod would violate
|
||||
// "MaxSkew" on some topology.
|
||||
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
// labelSelector spread as 3/1/1:
|
||||
// +-------+-------+-------+
|
||||
@ -5233,3 +5411,32 @@ type TopologySpreadConstraint struct {
|
||||
// +optional
|
||||
LabelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
// These are the built-in errors for PortStatus.
|
||||
const (
|
||||
// MixedProtocolNotSupported error in PortStatus means that the cloud provider
|
||||
// can't ensure the port on the load balancer because mixed values of protocols
|
||||
// on the same LoadBalancer type of Service are not supported by the cloud provider.
|
||||
MixedProtocolNotSupported = "MixedProtocolNotSupported"
|
||||
)
|
||||
|
||||
// PortStatus represents the error condition of a service port
|
||||
type PortStatus struct {
|
||||
// Port is the port number of the service port of which status is recorded here
|
||||
Port int32
|
||||
// Protocol is the protocol of the service port of which status is recorded here
|
||||
Protocol Protocol
|
||||
// Error is to record the problem with the service port
|
||||
// The format of the error shall comply with the following rules:
|
||||
// - built-in error values shall be specified in this file and those shall use
|
||||
// CamelCase names
|
||||
// - cloud provider specific error values must have names that comply with the
|
||||
// format foo.example.com/CamelCase.
|
||||
// ---
|
||||
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
|
||||
// +kubebuilder:validation:MaxLength=316
|
||||
Error *string
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD
generated
vendored
@ -27,7 +27,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS
generated
vendored
@ -21,13 +21,11 @@ reviewers:
|
||||
- sttts
|
||||
- dchen1107
|
||||
- saad-ali
|
||||
- zmerlynn
|
||||
- luxas
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- ncdc
|
||||
- tallclair
|
||||
- eparis
|
||||
- piosz
|
||||
- jsafrane
|
||||
- dims
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go
generated
vendored
@ -435,6 +435,7 @@ func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error {
|
||||
"involvedObject.resourceVersion",
|
||||
"involvedObject.fieldPath",
|
||||
"reason",
|
||||
"reportingComponent",
|
||||
"source",
|
||||
"type",
|
||||
"metadata.namespace",
|
||||
|
74
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
@ -19,7 +19,7 @@ package v1
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
@ -27,7 +27,6 @@ import (
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
@ -71,11 +70,6 @@ func SetDefaults_Volume(obj *v1.Volume) {
|
||||
}
|
||||
}
|
||||
}
|
||||
func SetDefaults_ContainerPort(obj *v1.ContainerPort) {
|
||||
if obj.Protocol == "" {
|
||||
obj.Protocol = v1.ProtocolTCP
|
||||
}
|
||||
}
|
||||
func SetDefaults_Container(obj *v1.Container) {
|
||||
if obj.ImagePullPolicy == "" {
|
||||
// Ignore error and assume it has been validated elsewhere
|
||||
@ -96,6 +90,10 @@ func SetDefaults_Container(obj *v1.Container) {
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_EphemeralContainer(obj *v1.EphemeralContainer) {
|
||||
SetDefaults_Container((*v1.Container)(&obj.EphemeralContainerCommon))
|
||||
}
|
||||
|
||||
func SetDefaults_Service(obj *v1.Service) {
|
||||
if obj.Spec.SessionAffinity == "" {
|
||||
obj.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
@ -133,32 +131,44 @@ func SetDefaults_Service(obj *v1.Service) {
|
||||
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
|
||||
}
|
||||
|
||||
// if dualstack feature gate is on then we need to default
|
||||
// Spec.IPFamily correctly. This is to cover the case
|
||||
// when an existing cluster have been converted to dualstack
|
||||
// i.e. it already contain services with Spec.IPFamily==nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) &&
|
||||
obj.Spec.Type != v1.ServiceTypeExternalName &&
|
||||
obj.Spec.ClusterIP != "" && /*has an ip already set*/
|
||||
obj.Spec.ClusterIP != "None" && /* not converting from ExternalName to other */
|
||||
obj.Spec.IPFamily == nil /* family was not previously set */ {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) {
|
||||
// Default obj.Spec.IPFamilyPolicy if we *know* we can, otherwise it will
|
||||
// be handled later in allocation.
|
||||
if obj.Spec.Type != v1.ServiceTypeExternalName {
|
||||
if obj.Spec.IPFamilyPolicy == nil {
|
||||
if len(obj.Spec.ClusterIPs) == 2 || len(obj.Spec.IPFamilies) == 2 {
|
||||
requireDualStack := v1.IPFamilyPolicyRequireDualStack
|
||||
obj.Spec.IPFamilyPolicy = &requireDualStack
|
||||
}
|
||||
}
|
||||
|
||||
// there is a change that the ClusterIP (set by user) is unparsable.
|
||||
// in this case, the family will be set mistakenly to ipv4 (because
|
||||
// the util function does not parse errors *sigh*). The error
|
||||
// will be caught in validation which asserts the validity of the
|
||||
// IP and the service object will not be persisted with the wrong IP
|
||||
// family
|
||||
// If the user demanded dual-stack, but only specified one family, we add
|
||||
// the other.
|
||||
if obj.Spec.IPFamilyPolicy != nil && *(obj.Spec.IPFamilyPolicy) == v1.IPFamilyPolicyRequireDualStack && len(obj.Spec.IPFamilies) == 1 {
|
||||
if obj.Spec.IPFamilies[0] == v1.IPv4Protocol {
|
||||
obj.Spec.IPFamilies = append(obj.Spec.IPFamilies, v1.IPv6Protocol)
|
||||
} else {
|
||||
obj.Spec.IPFamilies = append(obj.Spec.IPFamilies, v1.IPv4Protocol)
|
||||
}
|
||||
|
||||
ipv6 := v1.IPv6Protocol
|
||||
ipv4 := v1.IPv4Protocol
|
||||
if utilnet.IsIPv6String(obj.Spec.ClusterIP) {
|
||||
obj.Spec.IPFamily = &ipv6
|
||||
} else {
|
||||
obj.Spec.IPFamily = &ipv4
|
||||
// Any other dual-stack defaulting depends on cluster configuration.
|
||||
// Further IPFamilies, IPFamilyPolicy defaulting is in ClusterIP alloc/reserve logic
|
||||
// NOTE: strategy handles cases where ClusterIPs is used (but not ClusterIP).
|
||||
}
|
||||
}
|
||||
|
||||
// any other defaulting depends on cluster configuration.
|
||||
// further IPFamilies, IPFamilyPolicy defaulting is in ClusterIP alloc/reserve logic
|
||||
// note: conversion logic handles cases where ClusterIPs is used (but not ClusterIP).
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ServiceLBNodePortControl) {
|
||||
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
|
||||
if obj.Spec.AllocateLoadBalancerNodePorts == nil {
|
||||
obj.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func SetDefaults_Pod(obj *v1.Pod) {
|
||||
// If limits are specified, but requests are not, default requests to limits
|
||||
@ -285,9 +295,11 @@ func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) {
|
||||
if obj.Status.Phase == "" {
|
||||
obj.Status.Phase = v1.ClaimPending
|
||||
}
|
||||
if obj.Spec.VolumeMode == nil {
|
||||
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
func SetDefaults_PersistentVolumeClaimSpec(obj *v1.PersistentVolumeClaimSpec) {
|
||||
if obj.VolumeMode == nil {
|
||||
obj.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*obj.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
}
|
||||
func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) {
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD
generated
vendored
@ -12,10 +12,8 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -28,7 +26,6 @@ go_library(
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
|
117
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go
generated
vendored
117
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
@ -131,22 +130,19 @@ func IsOvercommitAllowed(name v1.ResourceName) bool {
|
||||
!IsHugePageResourceName(name)
|
||||
}
|
||||
|
||||
// IsAttachableVolumeResourceName returns true when the resource name is prefixed in attachable volume
|
||||
func IsAttachableVolumeResourceName(name v1.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix)
|
||||
}
|
||||
|
||||
// Extended and Hugepages resources
|
||||
func IsScalarResourceName(name v1.ResourceName) bool {
|
||||
return IsExtendedResourceName(name) || IsHugePageResourceName(name) ||
|
||||
IsPrefixedNativeResource(name) || IsAttachableVolumeResourceName(name)
|
||||
}
|
||||
|
||||
// this function aims to check if the service's ClusterIP is set or not
|
||||
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
|
||||
// the objective is not to perform validation here
|
||||
func IsServiceIPSet(service *v1.Service) bool {
|
||||
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
|
||||
}
|
||||
|
||||
// LoadBalancerStatusEqual evaluates the given load balancers' ingress IP addresses
|
||||
// and hostnames and returns true if equal or false if otherwise
|
||||
// TODO: make method on LoadBalancerStatus?
|
||||
func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool {
|
||||
return ingressSliceEqual(l.Ingress, r.Ingress)
|
||||
@ -191,7 +187,7 @@ func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string {
|
||||
return strings.Join(modesStr, ",")
|
||||
}
|
||||
|
||||
// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString
|
||||
// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString
|
||||
func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode {
|
||||
strmodes := strings.Split(modes, ",")
|
||||
accessModes := []v1.PersistentVolumeAccessMode{}
|
||||
@ -229,72 +225,6 @@ func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.Persisten
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
||||
// labels.Selector.
|
||||
func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {
|
||||
if len(nsm) == 0 {
|
||||
return labels.Nothing(), nil
|
||||
}
|
||||
selector := labels.NewSelector()
|
||||
for _, expr := range nsm {
|
||||
var op selection.Operator
|
||||
switch expr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
op = selection.In
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
op = selection.NotIn
|
||||
case v1.NodeSelectorOpExists:
|
||||
op = selection.Exists
|
||||
case v1.NodeSelectorOpDoesNotExist:
|
||||
op = selection.DoesNotExist
|
||||
case v1.NodeSelectorOpGt:
|
||||
op = selection.GreaterThan
|
||||
case v1.NodeSelectorOpLt:
|
||||
op = selection.LessThan
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
|
||||
}
|
||||
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector = selector.Add(*r)
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements
|
||||
// fields.Selector.
|
||||
func NodeSelectorRequirementsAsFieldSelector(nsm []v1.NodeSelectorRequirement) (fields.Selector, error) {
|
||||
if len(nsm) == 0 {
|
||||
return fields.Nothing(), nil
|
||||
}
|
||||
|
||||
selectors := []fields.Selector{}
|
||||
for _, expr := range nsm {
|
||||
switch expr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
if len(expr.Values) != 1 {
|
||||
return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q",
|
||||
len(expr.Values), expr.Operator)
|
||||
}
|
||||
selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0]))
|
||||
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
if len(expr.Values) != 1 {
|
||||
return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q",
|
||||
len(expr.Values), expr.Operator)
|
||||
}
|
||||
selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0]))
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator)
|
||||
}
|
||||
}
|
||||
|
||||
return fields.AndSelectors(selectors...), nil
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms
|
||||
func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool {
|
||||
for _, req := range reqs {
|
||||
@ -309,39 +239,6 @@ func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorR
|
||||
return false
|
||||
}
|
||||
|
||||
// MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed;
|
||||
// nil or empty term matches no objects.
|
||||
func MatchNodeSelectorTerms(
|
||||
nodeSelectorTerms []v1.NodeSelectorTerm,
|
||||
nodeLabels labels.Set,
|
||||
nodeFields fields.Set,
|
||||
) bool {
|
||||
for _, req := range nodeSelectorTerms {
|
||||
// nil or empty term selects no objects
|
||||
if len(req.MatchExpressions) == 0 && len(req.MatchFields) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(req.MatchExpressions) != 0 {
|
||||
labelSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
if err != nil || !labelSelector.Matches(nodeLabels) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.MatchFields) != 0 {
|
||||
fieldSelector, err := NodeSelectorRequirementsAsFieldSelector(req.MatchFields)
|
||||
if err != nil || !fieldSelector.Matches(nodeFields) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct
|
||||
// that implements labels.Selector.
|
||||
func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) {
|
||||
@ -467,7 +364,7 @@ func getFilteredTaints(taints []v1.Taint, inclusionFilter taintsFilterFunc) []v1
|
||||
return filteredTaints
|
||||
}
|
||||
|
||||
// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
|
||||
// GetMatchingTolerations returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
|
||||
func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) {
|
||||
if len(taints) == 0 {
|
||||
return true, []v1.Toleration{}
|
||||
@ -492,6 +389,8 @@ func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (boo
|
||||
return true, result
|
||||
}
|
||||
|
||||
// GetAvoidPodsFromNodeAnnotations scans the list of annotations and
|
||||
// returns the pods that needs to be avoided for this node from scheduling
|
||||
func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) {
|
||||
var avoidPods v1.AvoidPods
|
||||
if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" {
|
||||
|
158
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
158
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
@ -541,6 +541,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.EphemeralVolumeSource)(nil), (*core.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(a.(*v1.EphemeralVolumeSource), b.(*core.EphemeralVolumeSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.EphemeralVolumeSource)(nil), (*v1.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(a.(*core.EphemeralVolumeSource), b.(*v1.EphemeralVolumeSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_Event_To_core_Event(a.(*v1.Event), b.(*core.Event), scope)
|
||||
}); err != nil {
|
||||
@ -1131,6 +1141,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimTemplate)(nil), (*core.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(a.(*v1.PersistentVolumeClaimTemplate), b.(*core.PersistentVolumeClaimTemplate), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimTemplate)(nil), (*v1.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(a.(*core.PersistentVolumeClaimTemplate), b.(*v1.PersistentVolumeClaimTemplate), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimVolumeSource)(nil), (*core.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(a.(*v1.PersistentVolumeClaimVolumeSource), b.(*core.PersistentVolumeClaimVolumeSource), scope)
|
||||
}); err != nil {
|
||||
@ -1371,6 +1391,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PortStatus)(nil), (*core.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PortStatus_To_core_PortStatus(a.(*v1.PortStatus), b.(*core.PortStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PortStatus)(nil), (*v1.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PortStatus_To_v1_PortStatus(a.(*core.PortStatus), b.(*v1.PortStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PortworxVolumeSource)(nil), (*core.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(a.(*v1.PortworxVolumeSource), b.(*core.PortworxVolumeSource), scope)
|
||||
}); err != nil {
|
||||
@ -1621,6 +1651,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.SeccompProfile)(nil), (*core.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_SeccompProfile_To_core_SeccompProfile(a.(*v1.SeccompProfile), b.(*core.SeccompProfile), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.SeccompProfile)(nil), (*v1.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_SeccompProfile_To_v1_SeccompProfile(a.(*core.SeccompProfile), b.(*v1.SeccompProfile), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.Secret)(nil), (*v1.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_Secret_To_v1_Secret(a.(*core.Secret), b.(*v1.Secret), scope)
|
||||
}); err != nil {
|
||||
@ -3504,6 +3544,28 @@ func Convert_core_EphemeralContainers_To_v1_EphemeralContainers(in *core.Ephemer
|
||||
return autoConvert_core_EphemeralContainers_To_v1_EphemeralContainers(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *v1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeClaimTemplate = (*core.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate))
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource is an autogenerated conversion function.
|
||||
func Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *v1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *v1.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeClaimTemplate = (*v1.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate))
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource is an autogenerated conversion function.
|
||||
func Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *v1.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil {
|
||||
@ -3585,7 +3647,6 @@ func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventLis
|
||||
func autoConvert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
|
||||
out.Count = in.Count
|
||||
out.LastObservedTime = in.LastObservedTime
|
||||
out.State = core.EventSeriesState(in.State)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -3597,7 +3658,6 @@ func Convert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.Ev
|
||||
func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error {
|
||||
out.Count = in.Count
|
||||
out.LastObservedTime = in.LastObservedTime
|
||||
out.State = v1.EventSeriesState(in.State)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4241,6 +4301,7 @@ func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scop
|
||||
func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error {
|
||||
out.IP = in.IP
|
||||
out.Hostname = in.Hostname
|
||||
out.Ports = *(*[]core.PortStatus)(unsafe.Pointer(&in.Ports))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4252,6 +4313,7 @@ func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalan
|
||||
func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error {
|
||||
out.IP = in.IP
|
||||
out.Hostname = in.Hostname
|
||||
out.Ports = *(*[]v1.PortStatus)(unsafe.Pointer(&in.Ports))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5141,6 +5203,32 @@ func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(
|
||||
return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *v1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate is an autogenerated conversion function.
|
||||
func Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *v1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
return autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *v1.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate is an autogenerated conversion function.
|
||||
func Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *v1.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
return autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
|
||||
out.ClaimName = in.ClaimName
|
||||
out.ReadOnly = in.ReadOnly
|
||||
@ -5942,6 +6030,7 @@ func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecu
|
||||
out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup))
|
||||
out.Sysctls = *(*[]core.Sysctl)(unsafe.Pointer(&in.Sysctls))
|
||||
out.FSGroupChangePolicy = (*core.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy))
|
||||
out.SeccompProfile = (*core.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5964,6 +6053,7 @@ func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSe
|
||||
out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup))
|
||||
out.FSGroupChangePolicy = (*v1.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy))
|
||||
out.Sysctls = *(*[]v1.Sysctl)(unsafe.Pointer(&in.Sysctls))
|
||||
out.SeccompProfile = (*v1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6045,6 +6135,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
|
||||
out.PreemptionPolicy = (*core.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
|
||||
out.Overhead = *(*core.ResourceList)(unsafe.Pointer(&in.Overhead))
|
||||
out.TopologySpreadConstraints = *(*[]core.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
|
||||
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6083,6 +6174,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s
|
||||
out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
|
||||
out.Hostname = in.Hostname
|
||||
out.Subdomain = in.Subdomain
|
||||
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
|
||||
out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity))
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations))
|
||||
@ -6242,6 +6334,30 @@ func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplate
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1_PortStatus_To_core_PortStatus(in *v1.PortStatus, out *core.PortStatus, s conversion.Scope) error {
|
||||
out.Port = in.Port
|
||||
out.Protocol = core.Protocol(in.Protocol)
|
||||
out.Error = (*string)(unsafe.Pointer(in.Error))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PortStatus_To_core_PortStatus is an autogenerated conversion function.
|
||||
func Convert_v1_PortStatus_To_core_PortStatus(in *v1.PortStatus, out *core.PortStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1_PortStatus_To_core_PortStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *v1.PortStatus, s conversion.Scope) error {
|
||||
out.Port = in.Port
|
||||
out.Protocol = v1.Protocol(in.Protocol)
|
||||
out.Error = (*string)(unsafe.Pointer(in.Error))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PortStatus_To_v1_PortStatus is an autogenerated conversion function.
|
||||
func Convert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *v1.PortStatus, s conversion.Scope) error {
|
||||
return autoConvert_core_PortStatus_To_v1_PortStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeID = in.VolumeID
|
||||
out.FSType = in.FSType
|
||||
@ -7000,6 +7116,28 @@ func Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelector
|
||||
return autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in *v1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error {
|
||||
out.Type = core.SeccompProfileType(in.Type)
|
||||
out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_SeccompProfile_To_core_SeccompProfile is an autogenerated conversion function.
|
||||
func Convert_v1_SeccompProfile_To_core_SeccompProfile(in *v1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error {
|
||||
return autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *v1.SeccompProfile, s conversion.Scope) error {
|
||||
out.Type = v1.SeccompProfileType(in.Type)
|
||||
out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_SeccompProfile_To_v1_SeccompProfile is an autogenerated conversion function.
|
||||
func Convert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *v1.SeccompProfile, s conversion.Scope) error {
|
||||
return autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Immutable = (*bool)(unsafe.Pointer(in.Immutable))
|
||||
@ -7205,6 +7343,7 @@ func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityConte
|
||||
out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem))
|
||||
out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation))
|
||||
out.ProcMount = (*core.ProcMountType)(unsafe.Pointer(in.ProcMount))
|
||||
out.SeccompProfile = (*core.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7224,6 +7363,7 @@ func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityCon
|
||||
out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem))
|
||||
out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation))
|
||||
out.ProcMount = (*v1.ProcMountType)(unsafe.Pointer(in.ProcMount))
|
||||
out.SeccompProfile = (*v1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7478,6 +7618,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor
|
||||
out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports))
|
||||
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
|
||||
out.ClusterIP = in.ClusterIP
|
||||
out.ClusterIPs = *(*[]string)(unsafe.Pointer(&in.ClusterIPs))
|
||||
out.Type = core.ServiceType(in.Type)
|
||||
out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs))
|
||||
out.SessionAffinity = core.ServiceAffinity(in.SessionAffinity)
|
||||
@ -7488,8 +7629,10 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor
|
||||
out.HealthCheckNodePort = in.HealthCheckNodePort
|
||||
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
|
||||
out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
|
||||
out.IPFamily = (*core.IPFamily)(unsafe.Pointer(in.IPFamily))
|
||||
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
|
||||
out.IPFamilies = *(*[]core.IPFamily)(unsafe.Pointer(&in.IPFamilies))
|
||||
out.IPFamilyPolicy = (*core.IPFamilyPolicyType)(unsafe.Pointer(in.IPFamilyPolicy))
|
||||
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7503,6 +7646,9 @@ func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v
|
||||
out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports))
|
||||
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
|
||||
out.ClusterIP = in.ClusterIP
|
||||
out.ClusterIPs = *(*[]string)(unsafe.Pointer(&in.ClusterIPs))
|
||||
out.IPFamilies = *(*[]v1.IPFamily)(unsafe.Pointer(&in.IPFamilies))
|
||||
out.IPFamilyPolicy = (*v1.IPFamilyPolicyType)(unsafe.Pointer(in.IPFamilyPolicy))
|
||||
out.ExternalName = in.ExternalName
|
||||
out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs))
|
||||
out.LoadBalancerIP = in.LoadBalancerIP
|
||||
@ -7512,8 +7658,8 @@ func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v
|
||||
out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy)
|
||||
out.HealthCheckNodePort = in.HealthCheckNodePort
|
||||
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
|
||||
out.IPFamily = (*v1.IPFamily)(unsafe.Pointer(in.IPFamily))
|
||||
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
|
||||
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7526,6 +7672,7 @@ func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, ou
|
||||
if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7538,6 +7685,7 @@ func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus,
|
||||
if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7979,6 +8127,7 @@ func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *
|
||||
out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
|
||||
out.CSI = (*core.CSIVolumeSource)(unsafe.Pointer(in.CSI))
|
||||
out.Ephemeral = (*core.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -8024,6 +8173,7 @@ func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out
|
||||
out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
|
||||
out.CSI = (*v1.CSIVolumeSource)(unsafe.Pointer(in.CSI))
|
||||
out.Ephemeral = (*v1.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
68
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
@ -21,6 +21,8 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@ -88,9 +90,12 @@ func SetObjectDefaults_EndpointsList(in *v1.EndpointsList) {
|
||||
func SetObjectDefaults_EphemeralContainers(in *v1.EphemeralContainers) {
|
||||
for i := range in.EphemeralContainers {
|
||||
a := &in.EphemeralContainers[i]
|
||||
SetDefaults_EphemeralContainer(a)
|
||||
for j := range a.EphemeralContainerCommon.Ports {
|
||||
b := &a.EphemeralContainerCommon.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.EphemeralContainerCommon.Env {
|
||||
b := &a.EphemeralContainerCommon.Env[j]
|
||||
@ -200,6 +205,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) {
|
||||
|
||||
func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) {
|
||||
SetDefaults_PersistentVolumeClaim(in)
|
||||
SetDefaults_PersistentVolumeClaimSpec(&in.Spec)
|
||||
SetDefaults_ResourceList(&in.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&in.Spec.Resources.Requests)
|
||||
SetDefaults_ResourceList(&in.Status.Capacity)
|
||||
@ -272,13 +278,22 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.InitContainers {
|
||||
a := &in.Spec.InitContainers[i]
|
||||
SetDefaults_Container(a)
|
||||
for j := range a.Ports {
|
||||
b := &a.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.Env {
|
||||
b := &a.Env[j]
|
||||
@ -326,7 +341,9 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
SetDefaults_Container(a)
|
||||
for j := range a.Ports {
|
||||
b := &a.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.Env {
|
||||
b := &a.Env[j]
|
||||
@ -371,9 +388,12 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
}
|
||||
for i := range in.Spec.EphemeralContainers {
|
||||
a := &in.Spec.EphemeralContainers[i]
|
||||
SetDefaults_EphemeralContainer(a)
|
||||
for j := range a.EphemeralContainerCommon.Ports {
|
||||
b := &a.EphemeralContainerCommon.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.EphemeralContainerCommon.Env {
|
||||
b := &a.EphemeralContainerCommon.Env[j]
|
||||
@ -478,13 +498,22 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.InitContainers {
|
||||
a := &in.Template.Spec.InitContainers[i]
|
||||
SetDefaults_Container(a)
|
||||
for j := range a.Ports {
|
||||
b := &a.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.Env {
|
||||
b := &a.Env[j]
|
||||
@ -532,7 +561,9 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
SetDefaults_Container(a)
|
||||
for j := range a.Ports {
|
||||
b := &a.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.Env {
|
||||
b := &a.Env[j]
|
||||
@ -577,9 +608,12 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
}
|
||||
for i := range in.Template.Spec.EphemeralContainers {
|
||||
a := &in.Template.Spec.EphemeralContainers[i]
|
||||
SetDefaults_EphemeralContainer(a)
|
||||
for j := range a.EphemeralContainerCommon.Ports {
|
||||
b := &a.EphemeralContainerCommon.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.EphemeralContainerCommon.Env {
|
||||
b := &a.EphemeralContainerCommon.Env[j]
|
||||
@ -686,13 +720,22 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
SetDefaults_Container(a)
|
||||
for j := range a.Ports {
|
||||
b := &a.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.Env {
|
||||
b := &a.Env[j]
|
||||
@ -740,7 +783,9 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
SetDefaults_Container(a)
|
||||
for j := range a.Ports {
|
||||
b := &a.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.Env {
|
||||
b := &a.Env[j]
|
||||
@ -785,9 +830,12 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.EphemeralContainers {
|
||||
a := &in.Spec.Template.Spec.EphemeralContainers[i]
|
||||
SetDefaults_EphemeralContainer(a)
|
||||
for j := range a.EphemeralContainerCommon.Ports {
|
||||
b := &a.EphemeralContainerCommon.Ports[j]
|
||||
SetDefaults_ContainerPort(b)
|
||||
if reflect.ValueOf(b.Protocol).IsZero() {
|
||||
b.Protocol = "TCP"
|
||||
}
|
||||
}
|
||||
for j := range a.EphemeralContainerCommon.Env {
|
||||
b := &a.EphemeralContainerCommon.Env[j]
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD
generated
vendored
@ -22,24 +22,26 @@ go_library(
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/cluster/ports:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -56,16 +58,18 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/events/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS
generated
vendored
@ -21,12 +21,10 @@ reviewers:
|
||||
- sttts
|
||||
- quinton-hoole
|
||||
- dchen1107
|
||||
- zmerlynn
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- pwittrock
|
||||
- tallclair
|
||||
- eparis
|
||||
- soltysh
|
||||
- piosz
|
||||
- jsafrane
|
||||
|
139
vendor/k8s.io/kubernetes/pkg/apis/core/validation/conditional_validation.go
generated
vendored
139
vendor/k8s.io/kubernetes/pkg/apis/core/validation/conditional_validation.go
generated
vendored
@ -26,123 +26,36 @@ import (
|
||||
// ValidateConditionalService validates conditionally valid fields.
|
||||
func ValidateConditionalService(service, oldService *api.Service) field.ErrorList {
|
||||
var errs field.ErrorList
|
||||
// If the SCTPSupport feature is disabled, and the old object isn't using the SCTP feature, prevent the new object from using it
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && len(serviceSCTPFields(oldService)) == 0 {
|
||||
for _, f := range serviceSCTPFields(service) {
|
||||
errs = append(errs, field.NotSupported(f, api.ProtocolSCTP, []string{string(api.ProtocolTCP), string(api.ProtocolUDP)}))
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, validateMixedProtocolLBService(service, oldService)...)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func serviceSCTPFields(service *api.Service) []*field.Path {
|
||||
// validateMixedProtocolLBService checks if the old Service has type=LoadBalancer and whether the Service has different Protocols
|
||||
// on its ports. If the MixedProtocolLBService feature flag is disabled the usage of different Protocols in the new Service is
|
||||
// valid only if the old Service has different Protocols, too.
|
||||
func validateMixedProtocolLBService(service, oldService *api.Service) (errs field.ErrorList) {
|
||||
if service.Spec.Type != api.ServiceTypeLoadBalancer {
|
||||
return
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MixedProtocolLBService) {
|
||||
return
|
||||
}
|
||||
|
||||
if serviceHasMixedProtocols(service) && !serviceHasMixedProtocols(oldService) {
|
||||
errs = append(errs, field.Invalid(field.NewPath("spec", "ports"), service.Spec.Ports, "may not contain more than 1 protocol when type is 'LoadBalancer'"))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func serviceHasMixedProtocols(service *api.Service) bool {
|
||||
if service == nil {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
fields := []*field.Path{}
|
||||
for pIndex, p := range service.Spec.Ports {
|
||||
if p.Protocol == api.ProtocolSCTP {
|
||||
fields = append(fields, field.NewPath("spec.ports").Index(pIndex).Child("protocol"))
|
||||
}
|
||||
protos := map[string]bool{}
|
||||
for _, port := range service.Spec.Ports {
|
||||
protos[string(port.Protocol)] = true
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// ValidateConditionalEndpoints validates conditionally valid fields.
|
||||
func ValidateConditionalEndpoints(endpoints, oldEndpoints *api.Endpoints) field.ErrorList {
|
||||
var errs field.ErrorList
|
||||
// If the SCTPSupport feature is disabled, and the old object isn't using the SCTP feature, prevent the new object from using it
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && len(endpointsSCTPFields(oldEndpoints)) == 0 {
|
||||
for _, f := range endpointsSCTPFields(endpoints) {
|
||||
errs = append(errs, field.NotSupported(f, api.ProtocolSCTP, []string{string(api.ProtocolTCP), string(api.ProtocolUDP)}))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func endpointsSCTPFields(endpoints *api.Endpoints) []*field.Path {
|
||||
if endpoints == nil {
|
||||
return nil
|
||||
}
|
||||
fields := []*field.Path{}
|
||||
for sIndex, s := range endpoints.Subsets {
|
||||
for pIndex, p := range s.Ports {
|
||||
if p.Protocol == api.ProtocolSCTP {
|
||||
fields = append(fields, field.NewPath("subsets").Index(sIndex).Child("ports").Index(pIndex).Child("protocol"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// ValidateConditionalPodTemplate validates conditionally valid fields.
|
||||
// This should be called from Validate/ValidateUpdate for all resources containing a PodTemplateSpec
|
||||
func ValidateConditionalPodTemplate(podTemplate, oldPodTemplate *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList {
|
||||
var (
|
||||
podSpec *api.PodSpec
|
||||
oldPodSpec *api.PodSpec
|
||||
)
|
||||
if podTemplate != nil {
|
||||
podSpec = &podTemplate.Spec
|
||||
}
|
||||
if oldPodTemplate != nil {
|
||||
oldPodSpec = &oldPodTemplate.Spec
|
||||
}
|
||||
return validateConditionalPodSpec(podSpec, oldPodSpec, fldPath.Child("spec"))
|
||||
}
|
||||
|
||||
// ValidateConditionalPod validates conditionally valid fields.
|
||||
// This should be called from Validate/ValidateUpdate for all resources containing a Pod
|
||||
func ValidateConditionalPod(pod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList {
|
||||
var (
|
||||
podSpec *api.PodSpec
|
||||
oldPodSpec *api.PodSpec
|
||||
)
|
||||
if pod != nil {
|
||||
podSpec = &pod.Spec
|
||||
}
|
||||
if oldPod != nil {
|
||||
oldPodSpec = &oldPod.Spec
|
||||
}
|
||||
return validateConditionalPodSpec(podSpec, oldPodSpec, fldPath.Child("spec"))
|
||||
}
|
||||
|
||||
func validateConditionalPodSpec(podSpec, oldPodSpec *api.PodSpec, fldPath *field.Path) field.ErrorList {
|
||||
// Always make sure we have a non-nil current pod spec
|
||||
if podSpec == nil {
|
||||
podSpec = &api.PodSpec{}
|
||||
}
|
||||
|
||||
errs := field.ErrorList{}
|
||||
|
||||
// If the SCTPSupport feature is disabled, and the old object isn't using the SCTP feature, prevent the new object from using it
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && len(podSCTPFields(oldPodSpec, nil)) == 0 {
|
||||
for _, f := range podSCTPFields(podSpec, fldPath) {
|
||||
errs = append(errs, field.NotSupported(f, api.ProtocolSCTP, []string{string(api.ProtocolTCP), string(api.ProtocolUDP)}))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func podSCTPFields(podSpec *api.PodSpec, fldPath *field.Path) []*field.Path {
|
||||
if podSpec == nil {
|
||||
return nil
|
||||
}
|
||||
fields := []*field.Path{}
|
||||
for cIndex, c := range podSpec.InitContainers {
|
||||
for pIndex, p := range c.Ports {
|
||||
if p.Protocol == api.ProtocolSCTP {
|
||||
fields = append(fields, fldPath.Child("initContainers").Index(cIndex).Child("ports").Index(pIndex).Child("protocol"))
|
||||
}
|
||||
}
|
||||
}
|
||||
for cIndex, c := range podSpec.Containers {
|
||||
for pIndex, p := range c.Ports {
|
||||
if p.Protocol == api.ProtocolSCTP {
|
||||
fields = append(fields, fldPath.Child("containers").Index(cIndex).Child("ports").Index(pIndex).Child("protocol"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return fields
|
||||
return len(protos) > 1
|
||||
}
|
||||
|
89
vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go
generated
vendored
89
vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go
generated
vendored
@ -18,9 +18,14 @@ package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -33,8 +38,88 @@ const (
|
||||
NoteLengthLimit = 1024
|
||||
)
|
||||
|
||||
// ValidateEvent makes sure that the event makes sense.
|
||||
func ValidateEvent(event *core.Event) field.ErrorList {
|
||||
func ValidateEventCreate(event *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
|
||||
// Make sure events always pass legacy validation.
|
||||
allErrs := legacyValidateEvent(event)
|
||||
if requestVersion == v1.SchemeGroupVersion || requestVersion == eventsv1beta1.SchemeGroupVersion {
|
||||
// No further validation for backwards compatibility.
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Strict validation applies to creation via events.k8s.io/v1 API and newer.
|
||||
allErrs = append(allErrs, ValidateObjectMeta(&event.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, validateV1EventSeries(event)...)
|
||||
zeroTime := time.Time{}
|
||||
if event.EventTime.Time == zeroTime {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("eventTime"), ""))
|
||||
}
|
||||
if event.Type != v1.EventTypeNormal && event.Type != v1.EventTypeWarning {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("type"), "", fmt.Sprintf("has invalid value: %v", event.Type)))
|
||||
}
|
||||
if event.FirstTimestamp.Time != zeroTime {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("firstTimestamp"), "", "needs to be unset"))
|
||||
}
|
||||
if event.LastTimestamp.Time != zeroTime {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("lastTimestamp"), "", "needs to be unset"))
|
||||
}
|
||||
if event.Count != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("count"), "", "needs to be unset"))
|
||||
}
|
||||
if event.Source.Component != "" || event.Source.Host != "" {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("source"), "", "needs to be unset"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateEventUpdate(newEvent, oldEvent *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
|
||||
// Make sure the new event always passes legacy validation.
|
||||
allErrs := legacyValidateEvent(newEvent)
|
||||
if requestVersion == v1.SchemeGroupVersion || requestVersion == eventsv1beta1.SchemeGroupVersion {
|
||||
// No further validation for backwards compatibility.
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Strict validation applies to update via events.k8s.io/v1 API and newer.
|
||||
allErrs = append(allErrs, ValidateObjectMetaUpdate(&newEvent.ObjectMeta, &oldEvent.ObjectMeta, field.NewPath("metadata"))...)
|
||||
// if the series was modified, validate the new data
|
||||
if !reflect.DeepEqual(newEvent.Series, oldEvent.Series) {
|
||||
allErrs = append(allErrs, validateV1EventSeries(newEvent)...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.InvolvedObject, oldEvent.InvolvedObject, field.NewPath("involvedObject"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Message, oldEvent.Message, field.NewPath("message"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Source, oldEvent.Source, field.NewPath("source"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.FirstTimestamp, oldEvent.FirstTimestamp, field.NewPath("firstTimestamp"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.LastTimestamp, oldEvent.LastTimestamp, field.NewPath("lastTimestamp"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Count, oldEvent.Count, field.NewPath("count"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Type, oldEvent.Type, field.NewPath("type"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.EventTime, oldEvent.EventTime, field.NewPath("eventTime"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Action, oldEvent.Action, field.NewPath("action"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.Related, oldEvent.Related, field.NewPath("related"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingController, oldEvent.ReportingController, field.NewPath("reportingController"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingInstance, oldEvent.ReportingInstance, field.NewPath("reportingInstance"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateV1EventSeries(event *core.Event) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
zeroTime := time.Time{}
|
||||
if event.Series != nil {
|
||||
if event.Series.Count < 2 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("series.count"), "", fmt.Sprintf("should be at least 2")))
|
||||
}
|
||||
if event.Series.LastObservedTime.Time == zeroTime {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("series.lastObservedTime"), ""))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// legacyValidateEvent makes sure that the event makes sense.
|
||||
func legacyValidateEvent(event *core.Event) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// Because go
|
||||
zeroTime := time.Time{}
|
||||
|
894
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
894
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
File diff suppressed because it is too large
Load Diff
144
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
144
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
@ -1433,6 +1433,27 @@ func (in *EphemeralContainers) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) {
|
||||
*out = *in
|
||||
if in.VolumeClaimTemplate != nil {
|
||||
in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
|
||||
*out = new(PersistentVolumeClaimTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource.
|
||||
func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralVolumeSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Event) DeepCopyInto(out *Event) {
|
||||
*out = *in
|
||||
@ -2125,6 +2146,13 @@ func (in *List) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
|
||||
*out = *in
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]PortStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -2144,7 +2172,9 @@ func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
|
||||
if in.Ingress != nil {
|
||||
in, out := &in.Ingress, &out.Ingress
|
||||
*out = make([]LoadBalancerIngress, len(*in))
|
||||
copy(*out, *in)
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -2987,6 +3017,24 @@ func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) {
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate.
|
||||
func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PersistentVolumeClaimTemplate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) {
|
||||
*out = *in
|
||||
@ -3701,6 +3749,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
|
||||
*out = make([]Sysctl, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SeccompProfile != nil {
|
||||
in, out := &in.SeccompProfile, &out.SeccompProfile
|
||||
*out = new(SeccompProfile)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3798,6 +3851,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
|
||||
*out = make([]LocalObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SetHostnameAsFQDN != nil {
|
||||
in, out := &in.SetHostnameAsFQDN, &out.SetHostnameAsFQDN
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.Affinity != nil {
|
||||
in, out := &in.Affinity, &out.Affinity
|
||||
*out = new(Affinity)
|
||||
@ -4032,6 +4090,27 @@ func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PortStatus) DeepCopyInto(out *PortStatus) {
|
||||
*out = *in
|
||||
if in.Error != nil {
|
||||
in, out := &in.Error, &out.Error
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus.
|
||||
func (in *PortStatus) DeepCopy() *PortStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PortStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) {
|
||||
*out = *in
|
||||
@ -4672,6 +4751,27 @@ func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorR
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SeccompProfile) DeepCopyInto(out *SeccompProfile) {
|
||||
*out = *in
|
||||
if in.LocalhostProfile != nil {
|
||||
in, out := &in.LocalhostProfile, &out.LocalhostProfile
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeccompProfile.
|
||||
func (in *SeccompProfile) DeepCopy() *SeccompProfile {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SeccompProfile)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Secret) DeepCopyInto(out *Secret) {
|
||||
*out = *in
|
||||
@ -4926,6 +5026,11 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) {
|
||||
*out = new(ProcMountType)
|
||||
**out = **in
|
||||
}
|
||||
if in.SeccompProfile != nil {
|
||||
in, out := &in.SeccompProfile, &out.SeccompProfile
|
||||
*out = new(SeccompProfile)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -5180,6 +5285,21 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ClusterIPs != nil {
|
||||
in, out := &in.ClusterIPs, &out.ClusterIPs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IPFamilies != nil {
|
||||
in, out := &in.IPFamilies, &out.IPFamilies
|
||||
*out = make([]IPFamily, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IPFamilyPolicy != nil {
|
||||
in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy
|
||||
*out = new(IPFamilyPolicyType)
|
||||
**out = **in
|
||||
}
|
||||
if in.ExternalIPs != nil {
|
||||
in, out := &in.ExternalIPs, &out.ExternalIPs
|
||||
*out = make([]string, len(*in))
|
||||
@ -5195,16 +5315,16 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IPFamily != nil {
|
||||
in, out := &in.IPFamily, &out.IPFamily
|
||||
*out = new(IPFamily)
|
||||
**out = **in
|
||||
}
|
||||
if in.TopologyKeys != nil {
|
||||
in, out := &in.TopologyKeys, &out.TopologyKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllocateLoadBalancerNodePorts != nil {
|
||||
in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -5222,6 +5342,13 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec {
|
||||
func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) {
|
||||
*out = *in
|
||||
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -5712,6 +5839,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
|
||||
*out = new(CSIVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Ephemeral != nil {
|
||||
in, out := &in.Ephemeral, &out.Ephemeral
|
||||
*out = new(EphemeralVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS
generated
vendored
@ -30,10 +30,8 @@ reviewers:
|
||||
- mml
|
||||
- resouer
|
||||
- mbohlool
|
||||
- david-mcmahon
|
||||
- therc
|
||||
- pweil-
|
||||
- mqliang
|
||||
- lukaszo
|
||||
- jianhuiz
|
||||
labels:
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
@ -158,7 +158,7 @@ type IPBlock struct {
|
||||
Except []string
|
||||
}
|
||||
|
||||
// NetworkPolicyPeer describes a peer to allow traffic from.
|
||||
// NetworkPolicyPeer describes a peer to allow traffic to/from.
|
||||
type NetworkPolicyPeer struct {
|
||||
// This is a label selector which selects Pods. This field follows standard label
|
||||
// selector semantics; if present but empty, it selects all pods.
|
||||
@ -248,26 +248,27 @@ type IngressSpec struct {
|
||||
// +optional
|
||||
IngressClassName *string
|
||||
|
||||
// Backend is a default backend capable of servicing requests that don't
|
||||
// match any rule. At least one of 'backend' or 'rules' must be specified.
|
||||
// This field is optional to allow the loadbalancer controller or defaulting
|
||||
// logic to specify a global default.
|
||||
// DefaultBackend is the backend that should handle requests that don't
|
||||
// match any rule. If Rules are not specified, DefaultBackend must be specified.
|
||||
// If DefaultBackend is not set, the handling of requests that do not match any
|
||||
// of the rules will be up to the Ingress controller.
|
||||
// +optional
|
||||
Backend *IngressBackend
|
||||
DefaultBackend *IngressBackend
|
||||
|
||||
// TLS configuration. Currently the Ingress only supports a single TLS
|
||||
// port, 443. If multiple members of this list specify different hosts, they
|
||||
// will be multiplexed on the same port according to the hostname specified
|
||||
// through the SNI TLS extension, if the ingress controller fulfilling the
|
||||
// ingress supports SNI.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
TLS []IngressTLS
|
||||
|
||||
// A list of host rules used to configure the Ingress. If unspecified, or
|
||||
// no rule matches, all traffic is sent to the default backend.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
Rules []IngressRule
|
||||
// TODO: Add the ability to specify load-balancer IP through claims
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -314,7 +315,6 @@ type IngressClassList struct {
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of IngressClasses.
|
||||
// +listType=set
|
||||
Items []IngressClass
|
||||
}
|
||||
|
||||
@ -324,6 +324,7 @@ type IngressTLS struct {
|
||||
// this list must match the name/s used in the tlsSecret. Defaults to the
|
||||
// wildcard host setting for the loadbalancer controller fulfilling this
|
||||
// Ingress, if left unspecified.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
Hosts []string
|
||||
// SecretName is the name of the secret used to terminate TLS traffic on
|
||||
@ -404,6 +405,7 @@ type IngressRuleValue struct {
|
||||
// or '#'.
|
||||
type HTTPIngressRuleValue struct {
|
||||
// A collection of paths that map requests to backends.
|
||||
// +listType=atomic
|
||||
Paths []HTTPIngressPath
|
||||
// TODO: Consider adding fields for ingress-type specific global
|
||||
// options usable by a loadbalancer, like http keep-alive.
|
||||
@ -462,17 +464,39 @@ type HTTPIngressPath struct {
|
||||
|
||||
// IngressBackend describes all endpoints for a given service and port.
|
||||
type IngressBackend struct {
|
||||
// Specifies the name of the referenced service.
|
||||
// Service references a Service as a Backend.
|
||||
// This is a mutually exclusive setting with "Resource".
|
||||
// +optional
|
||||
ServiceName string
|
||||
|
||||
// Specifies the port of the referenced service.
|
||||
// +optional
|
||||
ServicePort intstr.IntOrString
|
||||
Service *IngressServiceBackend
|
||||
|
||||
// Resource is an ObjectRef to another Kubernetes resource in the namespace
|
||||
// of the Ingress object. If resource is specified, serviceName and servicePort
|
||||
// must not be specified.
|
||||
// of the Ingress object. If resource is specified, a service.Name and
|
||||
// service.Port must not be specified.
|
||||
// This is a mutually exclusive setting with "Service".
|
||||
// +optional
|
||||
Resource *api.TypedLocalObjectReference
|
||||
}
|
||||
|
||||
// IngressServiceBackend references a Kubernetes Service as a Backend.
|
||||
type IngressServiceBackend struct {
|
||||
// Name is the referenced service. The service must exist in
|
||||
// the same namespace as the Ingress object.
|
||||
Name string
|
||||
|
||||
// Port of the referenced service. A port name or port number
|
||||
// is required for a IngressServiceBackend.
|
||||
Port ServiceBackendPort
|
||||
}
|
||||
|
||||
// ServiceBackendPort is the service port being referenced.
|
||||
type ServiceBackendPort struct {
|
||||
// Name is the name of the port on the Service.
|
||||
// This is a mutually exclusive setting with "Number".
|
||||
// +optional
|
||||
Name string
|
||||
|
||||
// Number is the numerical port number (e.g. 80) on the Service.
|
||||
// This is a mutually exclusive setting with "Name".
|
||||
// +optional
|
||||
Number int32
|
||||
}
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
@ -124,7 +124,11 @@ func (in *Ingress) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
|
||||
*out = *in
|
||||
out.ServicePort = in.ServicePort
|
||||
if in.Service != nil {
|
||||
in, out := &in.Service, &out.Service
|
||||
*out = new(IngressServiceBackend)
|
||||
**out = **in
|
||||
}
|
||||
if in.Resource != nil {
|
||||
in, out := &in.Resource, &out.Resource
|
||||
*out = new(core.TypedLocalObjectReference)
|
||||
@ -295,6 +299,23 @@ func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressServiceBackend) DeepCopyInto(out *IngressServiceBackend) {
|
||||
*out = *in
|
||||
out.Port = in.Port
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressServiceBackend.
|
||||
func (in *IngressServiceBackend) DeepCopy() *IngressServiceBackend {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressServiceBackend)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
|
||||
*out = *in
|
||||
@ -303,8 +324,8 @@ func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Backend != nil {
|
||||
in, out := &in.Backend, &out.Backend
|
||||
if in.DefaultBackend != nil {
|
||||
in, out := &in.DefaultBackend, &out.DefaultBackend
|
||||
*out = new(IngressBackend)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
@ -585,3 +606,19 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBackendPort.
|
||||
func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceBackendPort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/apis/policy/types.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/apis/policy/types.go
generated
vendored
@ -157,7 +157,7 @@ type PodSecurityPolicySpec struct {
|
||||
// To allow all capabilities you may use '*'.
|
||||
// +optional
|
||||
AllowedCapabilities []api.Capability
|
||||
// Volumes is a white list of allowed volume plugins. Empty indicates that
|
||||
// Volumes is an allowlist of volume plugins. Empty indicates that
|
||||
// no volumes may be used. To allow all volumes you may use '*'.
|
||||
// +optional
|
||||
Volumes []FSType
|
||||
@ -200,23 +200,23 @@ type PodSecurityPolicySpec struct {
|
||||
// privilege escalation. If unspecified, defaults to true.
|
||||
// +optional
|
||||
AllowPrivilegeEscalation bool
|
||||
// AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
||||
// AllowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.
|
||||
// +optional
|
||||
AllowedHostPaths []AllowedHostPath
|
||||
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
|
||||
// AllowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all
|
||||
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
|
||||
// is allowed in the "Volumes" field.
|
||||
// +optional
|
||||
AllowedFlexVolumes []AllowedFlexVolume
|
||||
// AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
|
||||
// AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
|
||||
// An empty value indicates that any CSI driver can be used for inline ephemeral volumes.
|
||||
// This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.
|
||||
// This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate.
|
||||
// +optional
|
||||
AllowedCSIDrivers []AllowedCSIDriver
|
||||
// AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
|
||||
// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
|
||||
// Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
|
||||
@ -232,7 +232,7 @@ type PodSecurityPolicySpec struct {
|
||||
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
|
||||
// +optional
|
||||
ForbiddenSysctls []string
|
||||
// AllowedProcMountTypes is a whitelist of allowed ProcMountTypes.
|
||||
// AllowedProcMountTypes is an allowlist of ProcMountTypes.
|
||||
// Empty or nil indicates that only the DefaultProcMountType may be used.
|
||||
// +optional
|
||||
AllowedProcMountTypes []api.ProcMountType
|
||||
@ -449,7 +449,7 @@ const (
|
||||
// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses
|
||||
// for a pod.
|
||||
type RuntimeClassStrategyOptions struct {
|
||||
// allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod.
|
||||
// allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod.
|
||||
// A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the
|
||||
// list. An empty list requires the RuntimeClassName field to be unset.
|
||||
AllowedRuntimeClassNames []string
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD
generated
vendored
@ -1,57 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/apis/policy/validation",
|
||||
deps = [
|
||||
"//pkg/apis/apps/validation:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
530
vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go
generated
vendored
530
vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go
generated
vendored
@ -1,530 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
appsvalidation "k8s.io/kubernetes/pkg/apis/apps/validation"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
)
|
||||
|
||||
// ValidatePodDisruptionBudget validates a PodDisruptionBudget and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorList {
|
||||
allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec"))
|
||||
allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodDisruptionBudgetSpec validates a PodDisruptionBudgetSpec and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if spec.MinAvailable != nil && spec.MaxUnavailable != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, spec, "minAvailable and maxUnavailable cannot be both set"))
|
||||
}
|
||||
|
||||
if spec.MinAvailable != nil {
|
||||
allErrs = append(allErrs, appsvalidation.ValidatePositiveIntOrPercent(*spec.MinAvailable, fldPath.Child("minAvailable"))...)
|
||||
allErrs = append(allErrs, appsvalidation.IsNotMoreThan100Percent(*spec.MinAvailable, fldPath.Child("minAvailable"))...)
|
||||
}
|
||||
|
||||
if spec.MaxUnavailable != nil {
|
||||
allErrs = append(allErrs, appsvalidation.ValidatePositiveIntOrPercent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
allErrs = append(allErrs, appsvalidation.IsNotMoreThan100Percent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodDisruptionBudgetStatus validates a PodDisruptionBudgetStatus and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodDisruptionBudgetStatus(status policy.PodDisruptionBudgetStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DisruptionsAllowed), fldPath.Child("disruptionsAllowed"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentHealthy), fldPath.Child("currentHealthy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredHealthy), fldPath.Child("desiredHealthy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ExpectedPods), fldPath.Child("expectedPods"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicyName can be used to check whether the given
|
||||
// pod security policy name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidatePodSecurityPolicyName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// ValidatePodSecurityPolicy validates a PodSecurityPolicy and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodSecurityPolicy(psp *policy.PodSecurityPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(psp.Annotations, field.NewPath("metadata").Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicySpec validates a PodSecurityPolicySpec and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...)
|
||||
allErrs = append(allErrs, validatePSPRunAsGroup(fldPath.Child("runAsGroup"), spec.RunAsGroup)...)
|
||||
allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...)
|
||||
allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...)
|
||||
allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...)
|
||||
if len(spec.RequiredDropCapabilities) > 0 && hasCap(policy.AllowAllCapabilities, spec.AllowedCapabilities) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("requiredDropCapabilities"), spec.RequiredDropCapabilities,
|
||||
"must be empty when all capabilities are allowed by a wildcard"))
|
||||
}
|
||||
allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...)
|
||||
allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...)
|
||||
allErrs = append(allErrs, validatePSPDefaultAllowPrivilegeEscalation(fldPath.Child("defaultAllowPrivilegeEscalation"), spec.DefaultAllowPrivilegeEscalation, spec.AllowPrivilegeEscalation)...)
|
||||
allErrs = append(allErrs, validatePSPAllowedProcMountTypes(fldPath.Child("allowedProcMountTypes"), spec.AllowedProcMountTypes)...)
|
||||
allErrs = append(allErrs, validatePSPAllowedHostPaths(fldPath.Child("allowedHostPaths"), spec.AllowedHostPaths)...)
|
||||
allErrs = append(allErrs, validatePSPAllowedFlexVolumes(fldPath.Child("allowedFlexVolumes"), spec.AllowedFlexVolumes)...)
|
||||
allErrs = append(allErrs, validatePSPAllowedCSIDrivers(fldPath.Child("allowedCSIDrivers"), spec.AllowedCSIDrivers)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicySysctls(fldPath.Child("allowedUnsafeSysctls"), spec.AllowedUnsafeSysctls)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicySysctls(fldPath.Child("forbiddenSysctls"), spec.ForbiddenSysctls)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicySysctlListsDoNotOverlap(fldPath.Child("allowedUnsafeSysctls"), fldPath.Child("forbiddenSysctls"), spec.AllowedUnsafeSysctls, spec.ForbiddenSysctls)...)
|
||||
allErrs = append(allErrs, validateRuntimeClassStrategy(fldPath.Child("runtimeClass"), spec.RuntimeClass)...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicySpecificAnnotations validates annotations and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodSecurityPolicySpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if p := annotations[apparmor.DefaultProfileAnnotationKey]; p != "" {
|
||||
if err := apparmor.ValidateProfileFormat(p); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Key(apparmor.DefaultProfileAnnotationKey), p, err.Error()))
|
||||
}
|
||||
}
|
||||
if allowed := annotations[apparmor.AllowedProfilesAnnotationKey]; allowed != "" {
|
||||
for _, p := range strings.Split(allowed, ",") {
|
||||
if err := apparmor.ValidateProfileFormat(p); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Key(apparmor.AllowedProfilesAnnotationKey), allowed, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if p := annotations[seccomp.DefaultProfileAnnotationKey]; p != "" {
|
||||
allErrs = append(allErrs, apivalidation.ValidateSeccompProfile(p, fldPath.Key(seccomp.DefaultProfileAnnotationKey))...)
|
||||
}
|
||||
if allowed := annotations[seccomp.AllowedProfilesAnnotationKey]; allowed != "" {
|
||||
for _, p := range strings.Split(allowed, ",") {
|
||||
if p == seccomp.AllowAny {
|
||||
continue
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidateSeccompProfile(p, fldPath.Key(seccomp.AllowedProfilesAnnotationKey))...)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPAllowedHostPaths makes sure all allowed host paths follow:
|
||||
// 1. path prefix is required
|
||||
// 2. path prefix does not have any element which is ".."
|
||||
func validatePSPAllowedHostPaths(fldPath *field.Path, allowedHostPaths []policy.AllowedHostPath) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
for i, target := range allowedHostPaths {
|
||||
if target.PathPrefix == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Index(i), "is required"))
|
||||
break
|
||||
}
|
||||
parts := strings.Split(filepath.ToSlash(target.PathPrefix), "/")
|
||||
for _, item := range parts {
|
||||
if item == ".." {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), target.PathPrefix, "must not contain '..'"))
|
||||
break // even for `../../..`, one error is sufficient to make the point
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePSPAllowedFlexVolumes(fldPath *field.Path, flexVolumes []policy.AllowedFlexVolume) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(flexVolumes) > 0 {
|
||||
for idx, fv := range flexVolumes {
|
||||
if len(fv.Driver) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("allowedFlexVolumes").Index(idx).Child("driver"),
|
||||
"must specify a driver"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePSPAllowedCSIDrivers(fldPath *field.Path, csiDrivers []policy.AllowedCSIDriver) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(csiDrivers) > 0 {
|
||||
for idx, csiDriver := range csiDrivers {
|
||||
fieldPath := fldPath.Child("allowedCSIDriver").Index(idx).Child("name")
|
||||
allErrs = append(allErrs, apivalidation.ValidateCSIDriverName(csiDriver.Name, fieldPath)...)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy.
|
||||
func validatePSPSELinux(fldPath *field.Path, seLinux *policy.SELinuxStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// ensure the selinux strategy has a valid rule
|
||||
supportedSELinuxRules := sets.NewString(
|
||||
string(policy.SELinuxStrategyMustRunAs),
|
||||
string(policy.SELinuxStrategyRunAsAny),
|
||||
)
|
||||
if !supportedSELinuxRules.Has(string(seLinux.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), seLinux.Rule, supportedSELinuxRules.List()))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPRunAsUser validates the RunAsUser fields of PodSecurityPolicy.
|
||||
func validatePSPRunAsUser(fldPath *field.Path, runAsUser *policy.RunAsUserStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// ensure the user strategy has a valid rule
|
||||
supportedRunAsUserRules := sets.NewString(
|
||||
string(policy.RunAsUserStrategyMustRunAs),
|
||||
string(policy.RunAsUserStrategyMustRunAsNonRoot),
|
||||
string(policy.RunAsUserStrategyRunAsAny),
|
||||
)
|
||||
if !supportedRunAsUserRules.Has(string(runAsUser.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsUser.Rule, supportedRunAsUserRules.List()))
|
||||
}
|
||||
|
||||
// validate range settings
|
||||
for idx, rng := range runAsUser.Ranges {
|
||||
allErrs = append(allErrs, validateUserIDRange(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPRunAsGroup validates the RunAsGroup fields of PodSecurityPolicy.
|
||||
func validatePSPRunAsGroup(fldPath *field.Path, runAsGroup *policy.RunAsGroupStrategyOptions) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
if runAsGroup == nil {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
switch runAsGroup.Rule {
|
||||
case policy.RunAsGroupStrategyRunAsAny:
|
||||
if len(runAsGroup.Ranges) != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("ranges"), runAsGroup.Ranges, "Ranges must be empty"))
|
||||
}
|
||||
case policy.RunAsGroupStrategyMustRunAs, policy.RunAsGroupStrategyMayRunAs:
|
||||
if len(runAsGroup.Ranges) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("ranges"), runAsGroup.Ranges, "must provide at least one range"))
|
||||
}
|
||||
// validate range settings
|
||||
for idx, rng := range runAsGroup.Ranges {
|
||||
allErrs = append(allErrs, validateGroupIDRange(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
default:
|
||||
supportedRunAsGroupRules := []string{
|
||||
string(policy.RunAsGroupStrategyMustRunAs),
|
||||
string(policy.RunAsGroupStrategyRunAsAny),
|
||||
string(policy.RunAsGroupStrategyMayRunAs),
|
||||
}
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsGroup.Rule, supportedRunAsGroupRules))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy.
|
||||
func validatePSPFSGroup(fldPath *field.Path, groupOptions *policy.FSGroupStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
supportedRules := sets.NewString(
|
||||
string(policy.FSGroupStrategyMustRunAs),
|
||||
string(policy.FSGroupStrategyMayRunAs),
|
||||
string(policy.FSGroupStrategyRunAsAny),
|
||||
)
|
||||
if !supportedRules.Has(string(groupOptions.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List()))
|
||||
}
|
||||
|
||||
for idx, rng := range groupOptions.Ranges {
|
||||
allErrs = append(allErrs, validateGroupIDRange(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPSupplementalGroup validates the SupplementalGroupsStrategyOptions fields of the PodSecurityPolicy.
|
||||
func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *policy.SupplementalGroupsStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
supportedRules := sets.NewString(
|
||||
string(policy.SupplementalGroupsStrategyRunAsAny),
|
||||
string(policy.SupplementalGroupsStrategyMayRunAs),
|
||||
string(policy.SupplementalGroupsStrategyMustRunAs),
|
||||
)
|
||||
if !supportedRules.Has(string(groupOptions.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List()))
|
||||
}
|
||||
|
||||
for idx, rng := range groupOptions.Ranges {
|
||||
allErrs = append(allErrs, validateGroupIDRange(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy.
|
||||
func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []policy.FSType) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allowed := psputil.GetAllFSTypesAsSet()
|
||||
// add in the * value since that is a pseudo type that is not included by default
|
||||
allowed.Insert(string(policy.All))
|
||||
for _, v := range volumes {
|
||||
if !allowed.Has(string(v)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List()))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPDefaultAllowPrivilegeEscalation validates the DefaultAllowPrivilegeEscalation field against the AllowPrivilegeEscalation field of a PodSecurityPolicy.
|
||||
func validatePSPDefaultAllowPrivilegeEscalation(fldPath *field.Path, defaultAllowPrivilegeEscalation *bool, allowPrivilegeEscalation bool) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if defaultAllowPrivilegeEscalation != nil && *defaultAllowPrivilegeEscalation && !allowPrivilegeEscalation {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, defaultAllowPrivilegeEscalation, "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPAllowedProcMountTypes validates the DefaultAllowPrivilegeEscalation field against the AllowPrivilegeEscalation field of a PodSecurityPolicy.
|
||||
func validatePSPAllowedProcMountTypes(fldPath *field.Path, allowedProcMountTypes []core.ProcMountType) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for i, procMountType := range allowedProcMountTypes {
|
||||
if err := apivalidation.ValidateProcMountType(fldPath.Index(i), procMountType); err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]"
|
||||
|
||||
// SysctlPatternFmt is a regex used for matching valid sysctl patterns.
|
||||
const SysctlPatternFmt string = "(" + apivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt
|
||||
|
||||
var sysctlPatternRegexp = regexp.MustCompile("^" + SysctlPatternFmt + "$")
|
||||
|
||||
// IsValidSysctlPattern checks if name is a valid sysctl pattern.
|
||||
func IsValidSysctlPattern(name string) bool {
|
||||
if len(name) > apivalidation.SysctlMaxLength {
|
||||
return false
|
||||
}
|
||||
return sysctlPatternRegexp.MatchString(name)
|
||||
}
|
||||
|
||||
func validatePodSecurityPolicySysctlListsDoNotOverlap(allowedSysctlsFldPath, forbiddenSysctlsFldPath *field.Path, allowedUnsafeSysctls, forbiddenSysctls []string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for i, allowedSysctl := range allowedUnsafeSysctls {
|
||||
isAllowedSysctlPattern := false
|
||||
allowedSysctlPrefix := ""
|
||||
if strings.HasSuffix(allowedSysctl, "*") {
|
||||
isAllowedSysctlPattern = true
|
||||
allowedSysctlPrefix = strings.TrimSuffix(allowedSysctl, "*")
|
||||
}
|
||||
for j, forbiddenSysctl := range forbiddenSysctls {
|
||||
isForbiddenSysctlPattern := false
|
||||
forbiddenSysctlPrefix := ""
|
||||
if strings.HasSuffix(forbiddenSysctl, "*") {
|
||||
isForbiddenSysctlPattern = true
|
||||
forbiddenSysctlPrefix = strings.TrimSuffix(forbiddenSysctl, "*")
|
||||
}
|
||||
switch {
|
||||
case isAllowedSysctlPattern && isForbiddenSysctlPattern:
|
||||
if strings.HasPrefix(allowedSysctlPrefix, forbiddenSysctlPrefix) {
|
||||
allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl)))
|
||||
} else if strings.HasPrefix(forbiddenSysctlPrefix, allowedSysctlPrefix) {
|
||||
allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl)))
|
||||
}
|
||||
case isAllowedSysctlPattern:
|
||||
if strings.HasPrefix(forbiddenSysctl, allowedSysctlPrefix) {
|
||||
allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl)))
|
||||
}
|
||||
case isForbiddenSysctlPattern:
|
||||
if strings.HasPrefix(allowedSysctl, forbiddenSysctlPrefix) {
|
||||
allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl)))
|
||||
}
|
||||
default:
|
||||
if allowedSysctl == forbiddenSysctl {
|
||||
allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodSecurityPolicySysctls validates the sysctls fields of PodSecurityPolicy.
|
||||
func validatePodSecurityPolicySysctls(fldPath *field.Path, sysctls []string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if len(sysctls) == 0 {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
coversAll := false
|
||||
for i, s := range sysctls {
|
||||
if len(s) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("empty sysctl not allowed")))
|
||||
} else if !IsValidSysctlPattern(string(s)) {
|
||||
allErrs = append(
|
||||
allErrs,
|
||||
field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("must have at most %d characters and match regex %s",
|
||||
apivalidation.SysctlMaxLength,
|
||||
SysctlPatternFmt,
|
||||
)),
|
||||
)
|
||||
} else if s[0] == '*' {
|
||||
coversAll = true
|
||||
}
|
||||
}
|
||||
|
||||
if coversAll && len(sysctls) > 1 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("items"), fmt.Sprintf("if '*' is present, must not specify other sysctls")))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateUserIDRange(fldPath *field.Path, rng policy.IDRange) field.ErrorList {
|
||||
return validateIDRanges(fldPath, rng.Min, rng.Max)
|
||||
}
|
||||
|
||||
func validateGroupIDRange(fldPath *field.Path, rng policy.IDRange) field.ErrorList {
|
||||
return validateIDRanges(fldPath, rng.Min, rng.Max)
|
||||
}
|
||||
|
||||
// validateIDRanges ensures the range is valid.
|
||||
func validateIDRanges(fldPath *field.Path, min, max int64) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// if 0 <= Min <= Max then we do not need to validate max. It is always greater than or
|
||||
// equal to 0 and Min.
|
||||
if min < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), min, "min cannot be negative"))
|
||||
}
|
||||
if max < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("max"), max, "max cannot be negative"))
|
||||
}
|
||||
if min > max {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), min, "min cannot be greater than max"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPCapsAgainstDrops ensures an allowed cap is not listed in the required drops.
|
||||
func validatePSPCapsAgainstDrops(requiredDrops []core.Capability, capsToCheck []core.Capability, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if requiredDrops == nil {
|
||||
return allErrs
|
||||
}
|
||||
for _, cap := range capsToCheck {
|
||||
if hasCap(cap, requiredDrops) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, cap,
|
||||
fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String())))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateRuntimeClassStrategy ensures all the RuntimeClass restrictions are valid.
|
||||
func validateRuntimeClassStrategy(fldPath *field.Path, rc *policy.RuntimeClassStrategyOptions) field.ErrorList {
|
||||
if rc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var allErrs field.ErrorList
|
||||
|
||||
allowed := map[string]bool{}
|
||||
for i, name := range rc.AllowedRuntimeClassNames {
|
||||
if name != policy.AllowAllRuntimeClassNames {
|
||||
allErrs = append(allErrs, apivalidation.ValidateRuntimeClassName(name, fldPath.Child("allowedRuntimeClassNames").Index(i))...)
|
||||
}
|
||||
if allowed[name] {
|
||||
allErrs = append(allErrs, field.Duplicate(fldPath.Child("allowedRuntimeClassNames").Index(i), name))
|
||||
}
|
||||
allowed[name] = true
|
||||
}
|
||||
|
||||
if rc.DefaultRuntimeClassName != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateRuntimeClassName(*rc.DefaultRuntimeClassName, fldPath.Child("defaultRuntimeClassName"))...)
|
||||
if !allowed[*rc.DefaultRuntimeClassName] && !allowed[policy.AllowAllRuntimeClassNames] {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("allowedRuntimeClassNames"),
|
||||
fmt.Sprintf("default %q must be allowed", *rc.DefaultRuntimeClassName)))
|
||||
}
|
||||
}
|
||||
|
||||
if allowed[policy.AllowAllRuntimeClassNames] && len(rc.AllowedRuntimeClassNames) > 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("allowedRuntimeClassNames"), rc.AllowedRuntimeClassNames, "if '*' is present, must not specify other RuntimeClass names"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicyUpdate validates a PSP for updates.
|
||||
func ValidatePodSecurityPolicyUpdate(old *policy.PodSecurityPolicy, new *policy.PodSecurityPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(new.Annotations, field.NewPath("metadata").Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// hasCap checks for needle in haystack.
|
||||
func hasCap(needle core.Capability, haystack []core.Capability) bool {
|
||||
for _, c := range haystack {
|
||||
if needle == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
2
vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go
generated
vendored
@ -69,7 +69,7 @@ type PriorityClass struct {
|
||||
Description string
|
||||
|
||||
// PreemptionPolicy it the Policy for preempting pods with lower priority.
|
||||
// This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.
|
||||
// This field is beta-level.
|
||||
// +optional
|
||||
PreemptionPolicy *core.PreemptionPolicy
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/apis/storage/v1/util/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/apis/storage/v1/util/BUILD
generated
vendored
@ -1,26 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/apis/storage/v1/util",
|
||||
deps = ["//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
57
vendor/k8s.io/kubernetes/pkg/apis/storage/v1/util/helpers.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/apis/storage/v1/util/helpers.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// IsDefaultStorageClassAnnotation represents a StorageClass annotation that
|
||||
// marks a class as the default StorageClass
|
||||
const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
|
||||
|
||||
// BetaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
|
||||
// TODO: remove Beta when no longer used
|
||||
const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
|
||||
|
||||
// IsDefaultAnnotationText returns a pretty Yes/No String if
|
||||
// the annotation is set
|
||||
// TODO: remove Beta when no longer needed
|
||||
func IsDefaultAnnotationText(obj metav1.ObjectMeta) string {
|
||||
if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" {
|
||||
return "Yes"
|
||||
}
|
||||
if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" {
|
||||
return "Yes"
|
||||
}
|
||||
|
||||
return "No"
|
||||
}
|
||||
|
||||
// IsDefaultAnnotation returns a boolean if
|
||||
// the annotation is set
|
||||
// TODO: remove Beta when no longer needed
|
||||
func IsDefaultAnnotation(obj metav1.ObjectMeta) bool {
|
||||
if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
Reference in New Issue
Block a user