mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
202
vendor/k8s.io/dynamic-resource-allocation/LICENSE
generated
vendored
Normal file
202
vendor/k8s.io/dynamic-resource-allocation/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
47
vendor/k8s.io/dynamic-resource-allocation/api/conversion.go
generated
vendored
Normal file
47
vendor/k8s.io/dynamic-resource-allocation/api/conversion.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"unique"
|
||||
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
var (
|
||||
localSchemeBuilder runtime.SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func Convert_api_UniqueString_To_string(in *UniqueString, out *string, s conversion.Scope) error {
|
||||
if *in == NullUniqueString {
|
||||
*out = ""
|
||||
return nil
|
||||
}
|
||||
*out = in.String()
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_string_To_api_UniqueString(in *string, out *UniqueString, s conversion.Scope) error {
|
||||
if *in == "" {
|
||||
*out = NullUniqueString
|
||||
return nil
|
||||
}
|
||||
*out = UniqueString(unique.Make(*in))
|
||||
return nil
|
||||
}
|
22
vendor/k8s.io/dynamic-resource-allocation/api/doc.go
generated
vendored
Normal file
22
vendor/k8s.io/dynamic-resource-allocation/api/doc.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package api contains a variant of the API where strings are unique. These
|
||||
// unique strings are faster to compare and more efficient when used as key in
|
||||
// a map.
|
||||
//
|
||||
// +k8s:conversion-gen=k8s.io/api/resource/v1beta1
|
||||
package api
|
68
vendor/k8s.io/dynamic-resource-allocation/api/types.go
generated
vendored
Normal file
68
vendor/k8s.io/dynamic-resource-allocation/api/types.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ResourceSlice struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
Spec ResourceSliceSpec
|
||||
}
|
||||
|
||||
type ResourceSliceSpec struct {
|
||||
Driver UniqueString
|
||||
Pool ResourcePool
|
||||
NodeName UniqueString
|
||||
NodeSelector *v1.NodeSelector
|
||||
AllNodes bool
|
||||
Devices []Device
|
||||
}
|
||||
|
||||
type ResourcePool struct {
|
||||
Name UniqueString
|
||||
Generation int64
|
||||
ResourceSliceCount int64
|
||||
}
|
||||
type Device struct {
|
||||
Name UniqueString
|
||||
Basic *BasicDevice
|
||||
}
|
||||
|
||||
type BasicDevice struct {
|
||||
Attributes map[QualifiedName]DeviceAttribute
|
||||
Capacity map[QualifiedName]DeviceCapacity
|
||||
}
|
||||
|
||||
type QualifiedName string
|
||||
|
||||
type FullyQualifiedName string
|
||||
|
||||
type DeviceAttribute struct {
|
||||
IntValue *int64
|
||||
BoolValue *bool
|
||||
StringValue *string
|
||||
VersionValue *string
|
||||
}
|
||||
|
||||
type DeviceCapacity struct {
|
||||
Value resource.Quantity
|
||||
}
|
41
vendor/k8s.io/dynamic-resource-allocation/api/uniquestring.go
generated
vendored
Normal file
41
vendor/k8s.io/dynamic-resource-allocation/api/uniquestring.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"unique"
|
||||
)
|
||||
|
||||
// NullUniqueString is a UniqueString which contains no string.
|
||||
var NullUniqueString UniqueString
|
||||
|
||||
// UniqueString is a wrapper around [unique.Handle[string]].
|
||||
type UniqueString unique.Handle[string]
|
||||
|
||||
// Returns the string that is stored in the UniqueString.
|
||||
// If the UniqueString is null, the empty string is returned.
|
||||
func (us UniqueString) String() string {
|
||||
if us == NullUniqueString {
|
||||
return ""
|
||||
}
|
||||
return unique.Handle[string](us).Value()
|
||||
}
|
||||
|
||||
// MakeUniqueString constructs a new unique string.
|
||||
func MakeUniqueString(str string) UniqueString {
|
||||
return UniqueString(unique.Make(str))
|
||||
}
|
331
vendor/k8s.io/dynamic-resource-allocation/api/zz_generated.conversion.go
generated
vendored
Normal file
331
vendor/k8s.io/dynamic-resource-allocation/api/zz_generated.conversion.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by conversion-gen. DO NOT EDIT.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1beta1 "k8s.io/api/resource/v1beta1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*BasicDevice)(nil), (*v1beta1.BasicDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_BasicDevice_To_v1beta1_BasicDevice(a.(*BasicDevice), b.(*v1beta1.BasicDevice), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.BasicDevice)(nil), (*BasicDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_BasicDevice_To_api_BasicDevice(a.(*v1beta1.BasicDevice), b.(*BasicDevice), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Device)(nil), (*v1beta1.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Device_To_v1beta1_Device(a.(*Device), b.(*v1beta1.Device), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.Device)(nil), (*Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_Device_To_api_Device(a.(*v1beta1.Device), b.(*Device), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeviceAttribute)(nil), (*v1beta1.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(a.(*DeviceAttribute), b.(*v1beta1.DeviceAttribute), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.DeviceAttribute)(nil), (*DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(a.(*v1beta1.DeviceAttribute), b.(*DeviceAttribute), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeviceCapacity)(nil), (*v1beta1.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(a.(*DeviceCapacity), b.(*v1beta1.DeviceCapacity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.DeviceCapacity)(nil), (*DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(a.(*v1beta1.DeviceCapacity), b.(*DeviceCapacity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*ResourcePool)(nil), (*v1beta1.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_ResourcePool_To_v1beta1_ResourcePool(a.(*ResourcePool), b.(*v1beta1.ResourcePool), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.ResourcePool)(nil), (*ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_ResourcePool_To_api_ResourcePool(a.(*v1beta1.ResourcePool), b.(*ResourcePool), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*ResourceSlice)(nil), (*v1beta1.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_ResourceSlice_To_v1beta1_ResourceSlice(a.(*ResourceSlice), b.(*v1beta1.ResourceSlice), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.ResourceSlice)(nil), (*ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(a.(*v1beta1.ResourceSlice), b.(*ResourceSlice), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*ResourceSliceSpec)(nil), (*v1beta1.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(a.(*ResourceSliceSpec), b.(*v1beta1.ResourceSliceSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.ResourceSliceSpec)(nil), (*ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(a.(*v1beta1.ResourceSliceSpec), b.(*ResourceSliceSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*UniqueString)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_UniqueString_To_string(a.(*UniqueString), b.(*string), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*string)(nil), (*UniqueString)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_string_To_api_UniqueString(a.(*string), b.(*UniqueString), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_api_BasicDevice_To_v1beta1_BasicDevice(in *BasicDevice, out *v1beta1.BasicDevice, s conversion.Scope) error {
|
||||
out.Attributes = *(*map[v1beta1.QualifiedName]v1beta1.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
|
||||
out.Capacity = *(*map[v1beta1.QualifiedName]v1beta1.DeviceCapacity)(unsafe.Pointer(&in.Capacity))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_BasicDevice_To_v1beta1_BasicDevice is an autogenerated conversion function.
|
||||
func Convert_api_BasicDevice_To_v1beta1_BasicDevice(in *BasicDevice, out *v1beta1.BasicDevice, s conversion.Scope) error {
|
||||
return autoConvert_api_BasicDevice_To_v1beta1_BasicDevice(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_BasicDevice_To_api_BasicDevice(in *v1beta1.BasicDevice, out *BasicDevice, s conversion.Scope) error {
|
||||
out.Attributes = *(*map[QualifiedName]DeviceAttribute)(unsafe.Pointer(&in.Attributes))
|
||||
out.Capacity = *(*map[QualifiedName]DeviceCapacity)(unsafe.Pointer(&in.Capacity))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_BasicDevice_To_api_BasicDevice is an autogenerated conversion function.
|
||||
func Convert_v1beta1_BasicDevice_To_api_BasicDevice(in *v1beta1.BasicDevice, out *BasicDevice, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_BasicDevice_To_api_BasicDevice(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Device_To_v1beta1_Device(in *Device, out *v1beta1.Device, s conversion.Scope) error {
|
||||
if err := Convert_api_UniqueString_To_string(&in.Name, &out.Name, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Basic = (*v1beta1.BasicDevice)(unsafe.Pointer(in.Basic))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Device_To_v1beta1_Device is an autogenerated conversion function.
|
||||
func Convert_api_Device_To_v1beta1_Device(in *Device, out *v1beta1.Device, s conversion.Scope) error {
|
||||
return autoConvert_api_Device_To_v1beta1_Device(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_Device_To_api_Device(in *v1beta1.Device, out *Device, s conversion.Scope) error {
|
||||
if err := Convert_string_To_api_UniqueString(&in.Name, &out.Name, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Basic = (*BasicDevice)(unsafe.Pointer(in.Basic))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_Device_To_api_Device is an autogenerated conversion function.
|
||||
func Convert_v1beta1_Device_To_api_Device(in *v1beta1.Device, out *Device, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_Device_To_api_Device(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(in *DeviceAttribute, out *v1beta1.DeviceAttribute, s conversion.Scope) error {
|
||||
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
|
||||
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
|
||||
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
|
||||
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_DeviceAttribute_To_v1beta1_DeviceAttribute is an autogenerated conversion function.
|
||||
func Convert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(in *DeviceAttribute, out *v1beta1.DeviceAttribute, s conversion.Scope) error {
|
||||
return autoConvert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(in *v1beta1.DeviceAttribute, out *DeviceAttribute, s conversion.Scope) error {
|
||||
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
|
||||
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
|
||||
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
|
||||
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_DeviceAttribute_To_api_DeviceAttribute is an autogenerated conversion function.
|
||||
func Convert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(in *v1beta1.DeviceAttribute, out *DeviceAttribute, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(in *DeviceCapacity, out *v1beta1.DeviceCapacity, s conversion.Scope) error {
|
||||
out.Value = in.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_DeviceCapacity_To_v1beta1_DeviceCapacity is an autogenerated conversion function.
|
||||
func Convert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(in *DeviceCapacity, out *v1beta1.DeviceCapacity, s conversion.Scope) error {
|
||||
return autoConvert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(in *v1beta1.DeviceCapacity, out *DeviceCapacity, s conversion.Scope) error {
|
||||
out.Value = in.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_DeviceCapacity_To_api_DeviceCapacity is an autogenerated conversion function.
|
||||
func Convert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(in *v1beta1.DeviceCapacity, out *DeviceCapacity, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_ResourcePool_To_v1beta1_ResourcePool(in *ResourcePool, out *v1beta1.ResourcePool, s conversion.Scope) error {
|
||||
if err := Convert_api_UniqueString_To_string(&in.Name, &out.Name, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Generation = in.Generation
|
||||
out.ResourceSliceCount = in.ResourceSliceCount
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_ResourcePool_To_v1beta1_ResourcePool is an autogenerated conversion function.
|
||||
func Convert_api_ResourcePool_To_v1beta1_ResourcePool(in *ResourcePool, out *v1beta1.ResourcePool, s conversion.Scope) error {
|
||||
return autoConvert_api_ResourcePool_To_v1beta1_ResourcePool(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_ResourcePool_To_api_ResourcePool(in *v1beta1.ResourcePool, out *ResourcePool, s conversion.Scope) error {
|
||||
if err := Convert_string_To_api_UniqueString(&in.Name, &out.Name, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Generation = in.Generation
|
||||
out.ResourceSliceCount = in.ResourceSliceCount
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_ResourcePool_To_api_ResourcePool is an autogenerated conversion function.
|
||||
func Convert_v1beta1_ResourcePool_To_api_ResourcePool(in *v1beta1.ResourcePool, out *ResourcePool, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_ResourcePool_To_api_ResourcePool(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_ResourceSlice_To_v1beta1_ResourceSlice(in *ResourceSlice, out *v1beta1.ResourceSlice, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_ResourceSlice_To_v1beta1_ResourceSlice is an autogenerated conversion function.
|
||||
func Convert_api_ResourceSlice_To_v1beta1_ResourceSlice(in *ResourceSlice, out *v1beta1.ResourceSlice, s conversion.Scope) error {
|
||||
return autoConvert_api_ResourceSlice_To_v1beta1_ResourceSlice(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_ResourceSlice_To_api_ResourceSlice(in *v1beta1.ResourceSlice, out *ResourceSlice, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_ResourceSlice_To_api_ResourceSlice is an autogenerated conversion function.
|
||||
func Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(in *v1beta1.ResourceSlice, out *ResourceSlice, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_ResourceSlice_To_api_ResourceSlice(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in *ResourceSliceSpec, out *v1beta1.ResourceSliceSpec, s conversion.Scope) error {
|
||||
if err := Convert_api_UniqueString_To_string(&in.Driver, &out.Driver, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_ResourcePool_To_v1beta1_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_UniqueString_To_string(&in.NodeName, &out.NodeName, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
|
||||
out.AllNodes = in.AllNodes
|
||||
if in.Devices != nil {
|
||||
in, out := &in.Devices, &out.Devices
|
||||
*out = make([]v1beta1.Device, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_Device_To_v1beta1_Device(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Devices = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec is an autogenerated conversion function.
|
||||
func Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in *ResourceSliceSpec, out *v1beta1.ResourceSliceSpec, s conversion.Scope) error {
|
||||
return autoConvert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1beta1.ResourceSliceSpec, out *ResourceSliceSpec, s conversion.Scope) error {
|
||||
if err := Convert_string_To_api_UniqueString(&in.Driver, &out.Driver, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1beta1_ResourcePool_To_api_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_string_To_api_UniqueString(&in.NodeName, &out.NodeName, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
|
||||
out.AllNodes = in.AllNodes
|
||||
if in.Devices != nil {
|
||||
in, out := &in.Devices, &out.Devices
|
||||
*out = make([]Device, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1beta1_Device_To_api_Device(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Devices = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec is an autogenerated conversion function.
|
||||
func Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1beta1.ResourceSliceSpec, out *ResourceSliceSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(in, out, s)
|
||||
}
|
79
vendor/k8s.io/dynamic-resource-allocation/cel/cache.go
generated
vendored
Normal file
79
vendor/k8s.io/dynamic-resource-allocation/cel/cache.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/utils/keymutex"
|
||||
"k8s.io/utils/lru"
|
||||
)
|
||||
|
||||
// Cache is a thread-safe LRU cache for a compiled CEL expression.
|
||||
type Cache struct {
|
||||
compileMutex keymutex.KeyMutex
|
||||
cacheMutex sync.RWMutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewCache creates a cache. The maximum number of entries determines
|
||||
// how many entries are cached at most before dropping the oldest
|
||||
// entry.
|
||||
func NewCache(maxCacheEntries int) *Cache {
|
||||
return &Cache{
|
||||
compileMutex: keymutex.NewHashed(0),
|
||||
cache: lru.New(maxCacheEntries),
|
||||
}
|
||||
}
|
||||
|
||||
// GetOrCompile checks whether the cache already has a compilation result
|
||||
// and returns that if available. Otherwise it compiles, stores successful
|
||||
// results and returns the new result.
|
||||
func (c *Cache) GetOrCompile(expression string) CompilationResult {
|
||||
// Compiling a CEL expression is expensive enough that it is cheaper
|
||||
// to lock a mutex than doing it several times in parallel.
|
||||
c.compileMutex.LockKey(expression)
|
||||
//nolint:errcheck // Only returns an error for unknown keys, which isn't the case here.
|
||||
defer c.compileMutex.UnlockKey(expression)
|
||||
|
||||
cached := c.get(expression)
|
||||
if cached != nil {
|
||||
return *cached
|
||||
}
|
||||
|
||||
expr := GetCompiler().CompileCELExpression(expression, Options{})
|
||||
if expr.Error == nil {
|
||||
c.add(expression, &expr)
|
||||
}
|
||||
return expr
|
||||
}
|
||||
|
||||
func (c *Cache) add(expression string, expr *CompilationResult) {
|
||||
c.cacheMutex.Lock()
|
||||
defer c.cacheMutex.Unlock()
|
||||
c.cache.Add(expression, expr)
|
||||
}
|
||||
|
||||
func (c *Cache) get(expression string) *CompilationResult {
|
||||
c.cacheMutex.RLock()
|
||||
defer c.cacheMutex.RUnlock()
|
||||
expr, found := c.cache.Get(expression)
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
return expr.(*CompilationResult)
|
||||
}
|
324
vendor/k8s.io/dynamic-resource-allocation/cel/compile.go
generated
vendored
Normal file
324
vendor/k8s.io/dynamic-resource-allocation/cel/compile.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/google/cel-go/cel"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/common/types/traits"
|
||||
"github.com/google/cel-go/ext"
|
||||
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
celconfig "k8s.io/apiserver/pkg/apis/cel"
|
||||
apiservercel "k8s.io/apiserver/pkg/cel"
|
||||
"k8s.io/apiserver/pkg/cel/environment"
|
||||
"k8s.io/apiserver/pkg/cel/library"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
deviceVar = "device"
|
||||
driverVar = "driver"
|
||||
attributesVar = "attributes"
|
||||
capacityVar = "capacity"
|
||||
)
|
||||
|
||||
var (
|
||||
lazyCompilerInit sync.Once
|
||||
lazyCompiler *compiler
|
||||
)
|
||||
|
||||
func GetCompiler() *compiler {
|
||||
lazyCompilerInit.Do(func() {
|
||||
lazyCompiler = newCompiler()
|
||||
})
|
||||
return lazyCompiler
|
||||
}
|
||||
|
||||
// CompilationResult represents a compiled expression.
|
||||
type CompilationResult struct {
|
||||
Program cel.Program
|
||||
Error *apiservercel.Error
|
||||
Expression string
|
||||
OutputType *cel.Type
|
||||
Environment *cel.Env
|
||||
|
||||
// MaxCost represents the worst-case cost of the compiled MessageExpression in terms of CEL's cost units,
|
||||
// as used by cel.EstimateCost.
|
||||
MaxCost uint64
|
||||
|
||||
emptyMapVal ref.Val
|
||||
}
|
||||
|
||||
// Device defines the input values for a CEL selector expression.
|
||||
type Device struct {
|
||||
// Driver gets used as domain for any attribute which does not already
|
||||
// have a domain prefix. If set, then it is also made available as a
|
||||
// string attribute.
|
||||
Driver string
|
||||
Attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute
|
||||
Capacity map[resourceapi.QualifiedName]resourceapi.DeviceCapacity
|
||||
}
|
||||
|
||||
type compiler struct {
|
||||
envset *environment.EnvSet
|
||||
}
|
||||
|
||||
func newCompiler() *compiler {
|
||||
return &compiler{envset: mustBuildEnv()}
|
||||
}
|
||||
|
||||
// Options contains several additional parameters
|
||||
// for [CompileCELExpression]. All of them have reasonable
|
||||
// defaults.
|
||||
type Options struct {
|
||||
// EnvType allows to override the default environment type [environment.StoredExpressions].
|
||||
EnvType *environment.Type
|
||||
|
||||
// CostLimit allows overriding the default runtime cost limit [resourceapi.CELSelectorExpressionMaxCost].
|
||||
CostLimit *uint64
|
||||
}
|
||||
|
||||
// CompileCELExpression returns a compiled CEL expression. It evaluates to bool.
|
||||
//
|
||||
// TODO (https://github.com/kubernetes/kubernetes/issues/125826): validate AST to detect invalid attribute names.
|
||||
func (c compiler) CompileCELExpression(expression string, options Options) CompilationResult {
|
||||
resultError := func(errorString string, errType apiservercel.ErrorType) CompilationResult {
|
||||
return CompilationResult{
|
||||
Error: &apiservercel.Error{
|
||||
Type: errType,
|
||||
Detail: errorString,
|
||||
},
|
||||
Expression: expression,
|
||||
}
|
||||
}
|
||||
|
||||
env, err := c.envset.Env(ptr.Deref(options.EnvType, environment.StoredExpressions))
|
||||
if err != nil {
|
||||
return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal)
|
||||
}
|
||||
|
||||
// We don't have a SizeEstimator. The potential size of the input (= a
|
||||
// device) is already declared in the definition of the environment.
|
||||
estimator := &library.CostEstimator{}
|
||||
|
||||
ast, issues := env.Compile(expression)
|
||||
if issues != nil {
|
||||
return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid)
|
||||
}
|
||||
expectedReturnType := cel.BoolType
|
||||
if ast.OutputType() != expectedReturnType &&
|
||||
ast.OutputType() != cel.AnyType {
|
||||
return resultError(fmt.Sprintf("must evaluate to %v or the unknown type, not %v", expectedReturnType.String(), ast.OutputType().String()), apiservercel.ErrorTypeInvalid)
|
||||
}
|
||||
_, err = cel.AstToCheckedExpr(ast)
|
||||
if err != nil {
|
||||
// should be impossible since env.Compile returned no issues
|
||||
return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal)
|
||||
}
|
||||
prog, err := env.Program(ast,
|
||||
// The Kubernetes CEL base environment sets the VAP limit as runtime cost limit.
|
||||
// DRA has its own default cost limit and also allows the caller to change that
|
||||
// limit.
|
||||
cel.CostLimit(ptr.Deref(options.CostLimit, resourceapi.CELSelectorExpressionMaxCost)),
|
||||
cel.InterruptCheckFrequency(celconfig.CheckFrequency),
|
||||
)
|
||||
if err != nil {
|
||||
return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal)
|
||||
}
|
||||
|
||||
compilationResult := CompilationResult{
|
||||
Program: prog,
|
||||
Expression: expression,
|
||||
OutputType: ast.OutputType(),
|
||||
Environment: env,
|
||||
emptyMapVal: env.CELTypeAdapter().NativeToValue(map[string]any{}),
|
||||
}
|
||||
|
||||
costEst, err := env.EstimateCost(ast, estimator)
|
||||
if err != nil {
|
||||
compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "cost estimation failed: " + err.Error()}
|
||||
return compilationResult
|
||||
}
|
||||
|
||||
compilationResult.MaxCost = costEst.Max
|
||||
return compilationResult
|
||||
}
|
||||
|
||||
// getAttributeValue returns the native representation of the one value that
|
||||
// should be stored in the attribute, otherwise an error. An error is
|
||||
// also returned when there is no supported value.
|
||||
func getAttributeValue(attr resourceapi.DeviceAttribute) (any, error) {
|
||||
switch {
|
||||
case attr.IntValue != nil:
|
||||
return *attr.IntValue, nil
|
||||
case attr.BoolValue != nil:
|
||||
return *attr.BoolValue, nil
|
||||
case attr.StringValue != nil:
|
||||
return *attr.StringValue, nil
|
||||
case attr.VersionValue != nil:
|
||||
v, err := semver.Parse(*attr.VersionValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse semantic version: %w", err)
|
||||
}
|
||||
return apiservercel.Semver{Version: v}, nil
|
||||
default:
|
||||
return nil, errors.New("unsupported attribute value")
|
||||
}
|
||||
}
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
|
||||
func (c CompilationResult) DeviceMatches(ctx context.Context, input Device) (bool, *cel.EvalDetails, error) {
|
||||
// TODO (future): avoid building these maps and instead use a proxy
|
||||
// which wraps the underlying maps and directly looks up values.
|
||||
attributes := make(map[string]any)
|
||||
for name, attr := range input.Attributes {
|
||||
value, err := getAttributeValue(attr)
|
||||
if err != nil {
|
||||
return false, nil, fmt.Errorf("attribute %s: %w", name, err)
|
||||
}
|
||||
domain, id := parseQualifiedName(name, input.Driver)
|
||||
if attributes[domain] == nil {
|
||||
attributes[domain] = make(map[string]any)
|
||||
}
|
||||
attributes[domain].(map[string]any)[id] = value
|
||||
}
|
||||
|
||||
capacity := make(map[string]any)
|
||||
for name, cap := range input.Capacity {
|
||||
domain, id := parseQualifiedName(name, input.Driver)
|
||||
if capacity[domain] == nil {
|
||||
capacity[domain] = make(map[string]apiservercel.Quantity)
|
||||
}
|
||||
capacity[domain].(map[string]apiservercel.Quantity)[id] = apiservercel.Quantity{Quantity: &cap.Value}
|
||||
}
|
||||
|
||||
variables := map[string]any{
|
||||
deviceVar: map[string]any{
|
||||
driverVar: input.Driver,
|
||||
attributesVar: newStringInterfaceMapWithDefault(c.Environment.CELTypeAdapter(), attributes, c.emptyMapVal),
|
||||
capacityVar: newStringInterfaceMapWithDefault(c.Environment.CELTypeAdapter(), capacity, c.emptyMapVal),
|
||||
},
|
||||
}
|
||||
|
||||
result, details, err := c.Program.ContextEval(ctx, variables)
|
||||
if err != nil {
|
||||
return false, details, err
|
||||
}
|
||||
resultAny, err := result.ConvertToNative(boolType)
|
||||
if err != nil {
|
||||
return false, details, fmt.Errorf("CEL result of type %s could not be converted to bool: %w", result.Type().TypeName(), err)
|
||||
}
|
||||
resultBool, ok := resultAny.(bool)
|
||||
if !ok {
|
||||
return false, details, fmt.Errorf("CEL native result value should have been a bool, got instead: %T", resultAny)
|
||||
}
|
||||
return resultBool, details, nil
|
||||
}
|
||||
|
||||
func mustBuildEnv() *environment.EnvSet {
|
||||
envset := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true /* strictCost */)
|
||||
field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField {
|
||||
return apiservercel.NewDeclField(name, declType, required, nil, nil)
|
||||
}
|
||||
fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField {
|
||||
result := make(map[string]*apiservercel.DeclField, len(fields))
|
||||
for _, f := range fields {
|
||||
result[f.Name] = f
|
||||
}
|
||||
return result
|
||||
}
|
||||
deviceType := apiservercel.NewObjectType("kubernetes.DRADevice", fields(
|
||||
field(driverVar, apiservercel.StringType, true),
|
||||
field(attributesVar, apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewMapType(apiservercel.StringType, apiservercel.AnyType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), true),
|
||||
field(capacityVar, apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewMapType(apiservercel.StringType, apiservercel.QuantityDeclType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), true),
|
||||
))
|
||||
|
||||
versioned := []environment.VersionedOptions{
|
||||
{
|
||||
IntroducedVersion: version.MajorMinor(1, 31),
|
||||
EnvOptions: []cel.EnvOption{
|
||||
cel.Variable(deviceVar, deviceType.CelType()),
|
||||
|
||||
environment.UnversionedLib(library.SemverLib),
|
||||
|
||||
// https://pkg.go.dev/github.com/google/cel-go/ext#Bindings
|
||||
//
|
||||
// This is useful to simplify attribute lookups because the
|
||||
// domain only needs to be given once:
|
||||
//
|
||||
// cel.bind(dra, device.attributes["dra.example.com"], dra.oneBool && dra.anotherBool)
|
||||
ext.Bindings(ext.BindingsVersion(0)),
|
||||
},
|
||||
DeclTypes: []*apiservercel.DeclType{
|
||||
deviceType,
|
||||
},
|
||||
},
|
||||
}
|
||||
envset, err := envset.Extend(versioned...)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("internal error building CEL environment: %w", err))
|
||||
}
|
||||
return envset
|
||||
}
|
||||
|
||||
// parseQualifiedName splits into domain and identified, using the default domain
|
||||
// if the name does not contain one.
|
||||
func parseQualifiedName(name resourceapi.QualifiedName, defaultDomain string) (string, string) {
|
||||
sep := strings.Index(string(name), "/")
|
||||
if sep == -1 {
|
||||
return defaultDomain, string(name)
|
||||
}
|
||||
return string(name[0:sep]), string(name[sep+1:])
|
||||
}
|
||||
|
||||
// newStringInterfaceMapWithDefault is like
|
||||
// https://pkg.go.dev/github.com/google/cel-go@v0.20.1/common/types#NewStringInterfaceMap,
|
||||
// except that looking up an unknown key returns a default value.
|
||||
func newStringInterfaceMapWithDefault(adapter types.Adapter, value map[string]any, defaultValue ref.Val) traits.Mapper {
|
||||
return mapper{
|
||||
Mapper: types.NewStringInterfaceMap(adapter, value),
|
||||
defaultValue: defaultValue,
|
||||
}
|
||||
}
|
||||
|
||||
type mapper struct {
|
||||
traits.Mapper
|
||||
defaultValue ref.Val
|
||||
}
|
||||
|
||||
// Find wraps the mapper's Find so that a default empty map is returned when
|
||||
// the lookup did not find the entry.
|
||||
func (m mapper) Find(key ref.Val) (ref.Val, bool) {
|
||||
value, found := m.Mapper.Find(key)
|
||||
if found {
|
||||
return value, true
|
||||
}
|
||||
|
||||
return m.defaultValue, true
|
||||
}
|
48
vendor/k8s.io/dynamic-resource-allocation/resourceclaim/pod.go
generated
vendored
Normal file
48
vendor/k8s.io/dynamic-resource-allocation/resourceclaim/pod.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclaim
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// PodStatusEqual checks that both slices have the same number
|
||||
// of entries and that the pairs of entries are semantically
|
||||
// equivalent.
|
||||
//
|
||||
// The order of the entries matters: two slices with semantically
|
||||
// equivalent entries in different order are not equal. This is
|
||||
// done for the sake of performance because typically the
|
||||
// order of entries doesn't change.
|
||||
func PodStatusEqual(statusA, statusB []corev1.PodResourceClaimStatus) bool {
|
||||
if len(statusA) != len(statusB) {
|
||||
return false
|
||||
}
|
||||
// In most cases, status entries only get added once and not modified.
|
||||
// But this cannot be guaranteed, so for the sake of correctness in all
|
||||
// cases this code here has to check.
|
||||
for i := range statusA {
|
||||
if statusA[i].Name != statusB[i].Name {
|
||||
return false
|
||||
}
|
||||
if !ptr.Equal(statusA[i].ResourceClaimName, statusB[i].ResourceClaimName) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
108
vendor/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go
generated
vendored
Normal file
108
vendor/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package resourceclaim provides code that supports the usual pattern
|
||||
// for accessing the ResourceClaim that is referenced by a PodResourceClaim:
|
||||
//
|
||||
// - determine the ResourceClaim name that corresponds to the PodResourceClaim
|
||||
// - retrieve the ResourceClaim
|
||||
// - verify that the ResourceClaim is owned by the pod if generated from a template
|
||||
// - use the ResourceClaim
|
||||
package resourceclaim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAPIUnsupported is wrapped by the actual errors returned by Name and
|
||||
// indicates that none of the required fields are set.
|
||||
ErrAPIUnsupported = errors.New("none of the supported fields are set")
|
||||
|
||||
// ErrClaimNotFound is wrapped by the actual errors returned by Name and
|
||||
// indicates that the claim has not been created yet.
|
||||
ErrClaimNotFound = errors.New("ResourceClaim not created yet")
|
||||
)
|
||||
|
||||
// Name returns the name of the ResourceClaim object that gets referenced by or
|
||||
// created for the PodResourceClaim. Three different results are possible:
|
||||
//
|
||||
// - An error is returned when some field is not set as expected (either the
|
||||
// input is invalid or the API got extended and the library and the client
|
||||
// using it need to be updated) or the claim hasn't been created yet.
|
||||
//
|
||||
// The error includes pod and pod claim name and the unexpected field and
|
||||
// is derived from one of the pre-defined errors in this package.
|
||||
//
|
||||
// - A nil string pointer and no error when the ResourceClaim intentionally
|
||||
// didn't get created and the PodResourceClaim can be ignored.
|
||||
//
|
||||
// - A pointer to the name and no error when the ResourceClaim got created.
|
||||
// In this case the boolean determines whether IsForPod must be called
|
||||
// after retrieving the ResourceClaim and before using it.
|
||||
//
|
||||
// Determining the name depends on Kubernetes >= 1.28.
|
||||
func Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) (name *string, mustCheckOwner bool, err error) {
|
||||
switch {
|
||||
case podClaim.ResourceClaimName != nil:
|
||||
return podClaim.ResourceClaimName, false, nil
|
||||
case podClaim.ResourceClaimTemplateName != nil:
|
||||
for _, status := range pod.Status.ResourceClaimStatuses {
|
||||
if status.Name == podClaim.Name {
|
||||
return status.ResourceClaimName, true, nil
|
||||
}
|
||||
}
|
||||
return nil, false, fmt.Errorf(`pod "%s/%s": %w`, pod.Namespace, pod.Name, ErrClaimNotFound)
|
||||
default:
|
||||
return nil, false, fmt.Errorf(`pod "%s/%s", spec.resourceClaim %q: %w`, pod.Namespace, pod.Name, podClaim.Name, ErrAPIUnsupported)
|
||||
}
|
||||
}
|
||||
|
||||
// IsForPod checks that the ResourceClaim is the one that
|
||||
// was created for the Pod. It returns an error that is informative
|
||||
// enough to be returned by the caller without adding further details
|
||||
// about the Pod or ResourceClaim.
|
||||
func IsForPod(pod *v1.Pod, claim *resourceapi.ResourceClaim) error {
|
||||
// Checking the namespaces is just a precaution. The caller should
|
||||
// never pass in a ResourceClaim that isn't from the same namespace as the
|
||||
// Pod.
|
||||
if claim.Namespace != pod.Namespace || !metav1.IsControlledBy(claim, pod) {
|
||||
return fmt.Errorf("ResourceClaim %s/%s was not created for pod %s/%s (pod is not owner)", claim.Namespace, claim.Name, pod.Namespace, pod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsReservedForPod checks whether a claim lists the pod as one of the objects
|
||||
// that the claim was reserved for.
|
||||
func IsReservedForPod(pod *v1.Pod, claim *resourceapi.ResourceClaim) bool {
|
||||
for _, reserved := range claim.Status.ReservedFor {
|
||||
if reserved.UID == pod.UID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CanBeReserved checks whether the claim could be reserved for another object.
|
||||
func CanBeReserved(claim *resourceapi.ResourceClaim) bool {
|
||||
// Currently no restrictions on sharing...
|
||||
return true
|
||||
}
|
877
vendor/k8s.io/dynamic-resource-allocation/structured/allocator.go
generated
vendored
Normal file
877
vendor/k8s.io/dynamic-resource-allocation/structured/allocator.go
generated
vendored
Normal file
@ -0,0 +1,877 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package structured
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
draapi "k8s.io/dynamic-resource-allocation/api"
|
||||
"k8s.io/dynamic-resource-allocation/cel"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type deviceClassLister interface {
|
||||
// List returns a list of all DeviceClasses.
|
||||
List() ([]*resourceapi.DeviceClass, error)
|
||||
// Get returns the DeviceClass with the given className.
|
||||
Get(className string) (*resourceapi.DeviceClass, error)
|
||||
}
|
||||
|
||||
// Allocator calculates how to allocate a set of unallocated claims which use
|
||||
// structured parameters.
|
||||
//
|
||||
// It needs as input the node where the allocated claims are meant to be
|
||||
// available and the current state of the cluster (claims, classes, resource
|
||||
// slices).
|
||||
type Allocator struct {
|
||||
adminAccessEnabled bool
|
||||
claimsToAllocate []*resourceapi.ResourceClaim
|
||||
allocatedDevices sets.Set[DeviceID]
|
||||
classLister deviceClassLister
|
||||
slices []*resourceapi.ResourceSlice
|
||||
celCache *cel.Cache
|
||||
}
|
||||
|
||||
// NewAllocator returns an allocator for a certain set of claims or an error if
|
||||
// some problem was detected which makes it impossible to allocate claims.
|
||||
//
|
||||
// The returned Allocator can be used multiple times and is thread-safe.
|
||||
func NewAllocator(ctx context.Context,
|
||||
adminAccessEnabled bool,
|
||||
claimsToAllocate []*resourceapi.ResourceClaim,
|
||||
allocatedDevices sets.Set[DeviceID],
|
||||
classLister deviceClassLister,
|
||||
slices []*resourceapi.ResourceSlice,
|
||||
celCache *cel.Cache,
|
||||
) (*Allocator, error) {
|
||||
return &Allocator{
|
||||
adminAccessEnabled: adminAccessEnabled,
|
||||
claimsToAllocate: claimsToAllocate,
|
||||
allocatedDevices: allocatedDevices,
|
||||
classLister: classLister,
|
||||
slices: slices,
|
||||
celCache: celCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ClaimsToAllocate returns the claims that the allocator was created for.
|
||||
func (a *Allocator) ClaimsToAllocate() []*resourceapi.ResourceClaim {
|
||||
return a.claimsToAllocate
|
||||
}
|
||||
|
||||
// Allocate calculates the allocation(s) for one particular node.
|
||||
//
|
||||
// It returns an error only if some fatal problem occurred. These are errors
|
||||
// caused by invalid input data, like for example errors in CEL selectors, so a
|
||||
// scheduler should abort and report that problem instead of trying to find
|
||||
// other nodes where the error doesn't occur.
|
||||
//
|
||||
// In the future, special errors will be defined which enable the caller to
|
||||
// identify which object (like claim or class) caused the problem. This will
|
||||
// enable reporting the problem as event for those objects.
|
||||
//
|
||||
// If the claims cannot be allocated, it returns nil. This includes the
|
||||
// situation where the resource slices are incomplete at the moment.
|
||||
//
|
||||
// If the claims can be allocated, then it prepares one allocation result for
|
||||
// each unallocated claim. It is the responsibility of the caller to persist
|
||||
// those allocations, if desired.
|
||||
//
|
||||
// Allocate is thread-safe. If the caller wants to get the node name included
|
||||
// in log output, it can use contextual logging and add the node as an
|
||||
// additional value. A name can also be useful because log messages do not
|
||||
// have a common prefix. V(5) is used for one-time log entries, V(6) for important
|
||||
// progress reports, and V(7) for detailed debug output.
|
||||
func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []resourceapi.AllocationResult, finalErr error) {
|
||||
alloc := &allocator{
|
||||
Allocator: a,
|
||||
ctx: ctx, // all methods share the same a and thus ctx
|
||||
logger: klog.FromContext(ctx),
|
||||
deviceMatchesRequest: make(map[matchKey]bool),
|
||||
constraints: make([][]constraint, len(a.claimsToAllocate)),
|
||||
requestData: make(map[requestIndices]requestData),
|
||||
result: make([]internalAllocationResult, len(a.claimsToAllocate)),
|
||||
}
|
||||
alloc.logger.V(5).Info("Starting allocation", "numClaims", len(alloc.claimsToAllocate))
|
||||
defer alloc.logger.V(5).Info("Done with allocation", "success", len(finalResult) == len(alloc.claimsToAllocate), "err", finalErr)
|
||||
|
||||
// First determine all eligible pools.
|
||||
pools, err := GatherPools(ctx, alloc.slices, node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gather pool information: %w", err)
|
||||
}
|
||||
alloc.pools = pools
|
||||
if loggerV := alloc.logger.V(7); loggerV.Enabled() {
|
||||
loggerV.Info("Gathered pool information", "numPools", len(pools), "pools", pools)
|
||||
} else {
|
||||
alloc.logger.V(5).Info("Gathered pool information", "numPools", len(pools))
|
||||
}
|
||||
|
||||
// We allocate one claim after the other and for each claim, all of
|
||||
// its requests. For each individual device we pick one possible
|
||||
// candidate after the other, checking constraints as we go.
|
||||
// Each chosen candidate is marked as "in use" and the process
|
||||
// continues, recursively. This way, all requests get matched against
|
||||
// all candidates in all possible orders.
|
||||
//
|
||||
// The first full solution is chosen.
|
||||
//
|
||||
// In other words, this is an exhaustive search. This is okay because
|
||||
// it aborts early. Once scoring gets added, more intelligence may be
|
||||
// needed to avoid trying "equivalent" solutions (two identical
|
||||
// requests, two identical devices, two solutions that are the same in
|
||||
// practice).
|
||||
|
||||
// This is where we sanity check that we can actually handle the claims
|
||||
// and their requests. For each claim we determine how many devices
|
||||
// need to be allocated. If not all can be stored in the result, the
|
||||
// claim cannot be allocated.
|
||||
numDevicesTotal := 0
|
||||
for claimIndex, claim := range alloc.claimsToAllocate {
|
||||
numDevicesPerClaim := 0
|
||||
|
||||
// If we have any any request that wants "all" devices, we need to
|
||||
// figure out how much "all" is. If some pool is incomplete, we stop
|
||||
// here because allocation cannot succeed. Once we do scoring, we should
|
||||
// stop in all cases, not just when "all" devices are needed, because
|
||||
// pulling from an incomplete might not pick the best solution and it's
|
||||
// better to wait. This does not matter yet as long the incomplete pool
|
||||
// has some matching device.
|
||||
for requestIndex := range claim.Spec.Devices.Requests {
|
||||
request := &claim.Spec.Devices.Requests[requestIndex]
|
||||
for i, selector := range request.Selectors {
|
||||
if selector.CEL == nil {
|
||||
// Unknown future selector type!
|
||||
return nil, fmt.Errorf("claim %s, request %s, selector #%d: CEL expression empty (unsupported selector type?)", klog.KObj(claim), request.Name, i)
|
||||
}
|
||||
}
|
||||
|
||||
if !a.adminAccessEnabled && request.AdminAccess != nil {
|
||||
return nil, fmt.Errorf("claim %s, request %s: admin access is requested, but the feature is disabled", klog.KObj(claim), request.Name)
|
||||
}
|
||||
|
||||
// Should be set. If it isn't, something changed and we should refuse to proceed.
|
||||
if request.DeviceClassName == "" {
|
||||
return nil, fmt.Errorf("claim %s, request %s: missing device class name (unsupported request type?)", klog.KObj(claim), request.Name)
|
||||
}
|
||||
class, err := alloc.classLister.Get(request.DeviceClassName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("claim %s, request %s: could not retrieve device class %s: %w", klog.KObj(claim), request.Name, request.DeviceClassName, err)
|
||||
}
|
||||
|
||||
// Start collecting information about the request.
|
||||
// The class must be set and stored before calling isSelectable.
|
||||
requestData := requestData{
|
||||
class: class,
|
||||
}
|
||||
requestKey := requestIndices{claimIndex: claimIndex, requestIndex: requestIndex}
|
||||
alloc.requestData[requestKey] = requestData
|
||||
|
||||
switch request.AllocationMode {
|
||||
case resourceapi.DeviceAllocationModeExactCount:
|
||||
numDevices := request.Count
|
||||
if numDevices > math.MaxInt {
|
||||
// Allowed by API validation, but doesn't make sense.
|
||||
return nil, fmt.Errorf("claim %s, request %s: exact count %d is too large", klog.KObj(claim), request.Name, numDevices)
|
||||
}
|
||||
requestData.numDevices = int(numDevices)
|
||||
case resourceapi.DeviceAllocationModeAll:
|
||||
requestData.allDevices = make([]deviceWithID, 0, resourceapi.AllocationResultsMaxSize)
|
||||
for _, pool := range pools {
|
||||
if pool.IsIncomplete {
|
||||
return nil, fmt.Errorf("claim %s, request %s: asks for all devices, but resource pool %s is currently being updated", klog.KObj(claim), request.Name, pool.PoolID)
|
||||
}
|
||||
if pool.IsInvalid {
|
||||
return nil, fmt.Errorf("claim %s, request %s: asks for all devices, but resource pool %s is currently invalid", klog.KObj(claim), request.Name, pool.PoolID)
|
||||
}
|
||||
|
||||
for _, slice := range pool.Slices {
|
||||
for deviceIndex := range slice.Spec.Devices {
|
||||
selectable, err := alloc.isSelectable(requestKey, slice, deviceIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if selectable {
|
||||
device := deviceWithID{
|
||||
id: DeviceID{Driver: slice.Spec.Driver, Pool: slice.Spec.Pool.Name, Device: slice.Spec.Devices[deviceIndex].Name},
|
||||
basic: slice.Spec.Devices[deviceIndex].Basic,
|
||||
slice: slice,
|
||||
}
|
||||
requestData.allDevices = append(requestData.allDevices, device)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
requestData.numDevices = len(requestData.allDevices)
|
||||
alloc.logger.V(6).Info("Request for 'all' devices", "claim", klog.KObj(claim), "request", request.Name, "numDevicesPerRequest", requestData.numDevices)
|
||||
default:
|
||||
return nil, fmt.Errorf("claim %s, request %s: unsupported count mode %s", klog.KObj(claim), request.Name, request.AllocationMode)
|
||||
}
|
||||
alloc.requestData[requestKey] = requestData
|
||||
numDevicesPerClaim += requestData.numDevices
|
||||
}
|
||||
alloc.logger.V(6).Info("Checked claim", "claim", klog.KObj(claim), "numDevices", numDevicesPerClaim)
|
||||
|
||||
// Check that we don't end up with too many results.
|
||||
if numDevicesPerClaim > resourceapi.AllocationResultsMaxSize {
|
||||
return nil, fmt.Errorf("claim %s: number of requested devices %d exceeds the claim limit of %d", klog.KObj(claim), numDevicesPerClaim, resourceapi.AllocationResultsMaxSize)
|
||||
}
|
||||
|
||||
// If we don't, then we can pre-allocate the result slices for
|
||||
// appending the actual results later.
|
||||
alloc.result[claimIndex].devices = make([]internalDeviceResult, 0, numDevicesPerClaim)
|
||||
|
||||
// Constraints are assumed to be monotonic: once a constraint returns
|
||||
// false, adding more devices will not cause it to return true. This
|
||||
// allows the search to stop early once a constraint returns false.
|
||||
constraints := make([]constraint, len(claim.Spec.Devices.Constraints))
|
||||
for i, constraint := range claim.Spec.Devices.Constraints {
|
||||
switch {
|
||||
case constraint.MatchAttribute != nil:
|
||||
matchAttribute := draapi.FullyQualifiedName(*constraint.MatchAttribute)
|
||||
logger := alloc.logger
|
||||
if loggerV := alloc.logger.V(6); loggerV.Enabled() {
|
||||
logger = klog.LoggerWithName(logger, "matchAttributeConstraint")
|
||||
logger = klog.LoggerWithValues(logger, "matchAttribute", matchAttribute)
|
||||
}
|
||||
m := &matchAttributeConstraint{
|
||||
logger: logger,
|
||||
requestNames: sets.New(constraint.Requests...),
|
||||
attributeName: matchAttribute,
|
||||
}
|
||||
constraints[i] = m
|
||||
default:
|
||||
// Unknown constraint type!
|
||||
return nil, fmt.Errorf("claim %s, constraint #%d: empty constraint (unsupported constraint type?)", klog.KObj(claim), i)
|
||||
}
|
||||
}
|
||||
alloc.constraints[claimIndex] = constraints
|
||||
numDevicesTotal += numDevicesPerClaim
|
||||
}
|
||||
|
||||
// Selecting a device for a request is independent of what has been
|
||||
// allocated already. Therefore the result of checking a request against
|
||||
// a device instance in the pool can be cached. The pointer to both
|
||||
// can serve as key because they are static for the duration of
|
||||
// the Allocate call and can be compared in Go.
|
||||
alloc.deviceMatchesRequest = make(map[matchKey]bool)
|
||||
|
||||
// We can estimate the size based on what we need to allocate.
|
||||
alloc.allocatingDevices = make(map[DeviceID]bool, numDevicesTotal)
|
||||
|
||||
alloc.logger.V(6).Info("Gathered information about devices", "numAllocated", len(alloc.allocatedDevices), "toBeAllocated", numDevicesTotal)
|
||||
|
||||
// In practice, there aren't going to be many different CEL
|
||||
// expressions. Most likely, there is going to be handful of different
|
||||
// device classes that get used repeatedly. Different requests may all
|
||||
// use the same selector. Therefore compiling CEL expressions on demand
|
||||
// could be a useful performance enhancement. It's not implemented yet
|
||||
// because the key is more complex (just the string?) and the memory
|
||||
// for both key and cached content is larger than for device matches.
|
||||
//
|
||||
// We may also want to cache this in the shared [Allocator] instance,
|
||||
// which implies adding locking.
|
||||
|
||||
// All errors get created such that they can be returned by Allocate
|
||||
// without further wrapping.
|
||||
done, err := alloc.allocateOne(deviceIndices{})
|
||||
if errors.Is(err, errStop) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !done {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result := make([]resourceapi.AllocationResult, len(alloc.result))
|
||||
for claimIndex, internalResult := range alloc.result {
|
||||
claim := alloc.claimsToAllocate[claimIndex]
|
||||
allocationResult := &result[claimIndex]
|
||||
allocationResult.Devices.Results = make([]resourceapi.DeviceRequestAllocationResult, len(internalResult.devices))
|
||||
for i, internal := range internalResult.devices {
|
||||
allocationResult.Devices.Results[i] = resourceapi.DeviceRequestAllocationResult{
|
||||
Request: internal.request,
|
||||
Driver: internal.id.Driver.String(),
|
||||
Pool: internal.id.Pool.String(),
|
||||
Device: internal.id.Device.String(),
|
||||
AdminAccess: internal.adminAccess,
|
||||
}
|
||||
}
|
||||
|
||||
// Populate configs.
|
||||
for requestIndex := range claim.Spec.Devices.Requests {
|
||||
class := alloc.requestData[requestIndices{claimIndex: claimIndex, requestIndex: requestIndex}].class
|
||||
if class != nil {
|
||||
for _, config := range class.Spec.Config {
|
||||
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
||||
Source: resourceapi.AllocationConfigSourceClass,
|
||||
Requests: nil, // All of them...
|
||||
DeviceConfiguration: config.DeviceConfiguration,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, config := range claim.Spec.Devices.Config {
|
||||
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
||||
Source: resourceapi.AllocationConfigSourceClaim,
|
||||
Requests: config.Requests,
|
||||
DeviceConfiguration: config.DeviceConfiguration,
|
||||
})
|
||||
}
|
||||
|
||||
// Determine node selector.
|
||||
nodeSelector, err := alloc.createNodeSelector(internalResult.devices)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create NodeSelector for claim %s: %w", claim.Name, err)
|
||||
}
|
||||
allocationResult.NodeSelector = nodeSelector
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// errStop is a special error that gets returned by allocateOne if it detects
|
||||
// that allocation cannot succeed.
|
||||
var errStop = errors.New("stop allocation")
|
||||
|
||||
// allocator is used while an [Allocator.Allocate] is running. Only a single
|
||||
// goroutine works with it, so there is no need for locking.
|
||||
type allocator struct {
|
||||
*Allocator
|
||||
ctx context.Context
|
||||
logger klog.Logger
|
||||
pools []*Pool
|
||||
deviceMatchesRequest map[matchKey]bool
|
||||
constraints [][]constraint // one list of constraints per claim
|
||||
requestData map[requestIndices]requestData // one entry per request
|
||||
allocatingDevices map[DeviceID]bool
|
||||
result []internalAllocationResult
|
||||
}
|
||||
|
||||
// matchKey identifies a device/request pair.
|
||||
type matchKey struct {
|
||||
DeviceID
|
||||
requestIndices
|
||||
}
|
||||
|
||||
// requestIndices identifies one specific request by its
|
||||
// claim and request index.
|
||||
type requestIndices struct {
|
||||
claimIndex, requestIndex int
|
||||
}
|
||||
|
||||
// deviceIndices identifies one specific required device inside
|
||||
// a request of a certain claim.
|
||||
type deviceIndices struct {
|
||||
claimIndex, requestIndex, deviceIndex int
|
||||
}
|
||||
|
||||
type requestData struct {
|
||||
class *resourceapi.DeviceClass
|
||||
numDevices int
|
||||
|
||||
// pre-determined set of devices for allocating "all" devices
|
||||
allDevices []deviceWithID
|
||||
}
|
||||
|
||||
type deviceWithID struct {
|
||||
id DeviceID
|
||||
basic *draapi.BasicDevice
|
||||
slice *draapi.ResourceSlice
|
||||
}
|
||||
|
||||
type internalAllocationResult struct {
|
||||
devices []internalDeviceResult
|
||||
}
|
||||
|
||||
type internalDeviceResult struct {
|
||||
request string
|
||||
id DeviceID
|
||||
slice *draapi.ResourceSlice
|
||||
adminAccess *bool
|
||||
}
|
||||
|
||||
type constraint interface {
|
||||
// add is called whenever a device is about to be allocated. It must
|
||||
// check whether the device matches the constraint and if yes,
|
||||
// track that it is allocated.
|
||||
add(requestName string, device *draapi.BasicDevice, deviceID DeviceID) bool
|
||||
|
||||
// For every successful add there is exactly one matching removed call
|
||||
// with the exact same parameters.
|
||||
remove(requestName string, device *draapi.BasicDevice, deviceID DeviceID)
|
||||
}
|
||||
|
||||
// matchAttributeConstraint compares an attribute value across devices.
|
||||
// All devices must share the same value. When the set of devices is
|
||||
// empty, any device that has the attribute can be added. After that,
|
||||
// only matching devices can be added.
|
||||
//
|
||||
// We don't need to track *which* devices are part of the set, only
|
||||
// how many.
|
||||
type matchAttributeConstraint struct {
|
||||
logger klog.Logger // Includes name and attribute name, so no need to repeat in log messages.
|
||||
requestNames sets.Set[string]
|
||||
attributeName draapi.FullyQualifiedName
|
||||
|
||||
attribute *draapi.DeviceAttribute
|
||||
numDevices int
|
||||
}
|
||||
|
||||
func (m *matchAttributeConstraint) add(requestName string, device *draapi.BasicDevice, deviceID DeviceID) bool {
|
||||
if m.requestNames.Len() > 0 && !m.requestNames.Has(requestName) {
|
||||
// Device not affected by constraint.
|
||||
m.logger.V(7).Info("Constraint does not apply to request", "request", requestName)
|
||||
return true
|
||||
}
|
||||
|
||||
attribute := lookupAttribute(device, deviceID, m.attributeName)
|
||||
if attribute == nil {
|
||||
// Doesn't have the attribute.
|
||||
m.logger.V(7).Info("Constraint not satisfied, attribute not set")
|
||||
return false
|
||||
}
|
||||
|
||||
if m.numDevices == 0 {
|
||||
// The first device can always get picked.
|
||||
m.attribute = attribute
|
||||
m.numDevices = 1
|
||||
m.logger.V(7).Info("First in set")
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case attribute.StringValue != nil:
|
||||
if m.attribute.StringValue == nil || *attribute.StringValue != *m.attribute.StringValue {
|
||||
m.logger.V(7).Info("String values different")
|
||||
return false
|
||||
}
|
||||
case attribute.IntValue != nil:
|
||||
if m.attribute.IntValue == nil || *attribute.IntValue != *m.attribute.IntValue {
|
||||
m.logger.V(7).Info("Int values different")
|
||||
return false
|
||||
}
|
||||
case attribute.BoolValue != nil:
|
||||
if m.attribute.BoolValue == nil || *attribute.BoolValue != *m.attribute.BoolValue {
|
||||
m.logger.V(7).Info("Bool values different")
|
||||
return false
|
||||
}
|
||||
case attribute.VersionValue != nil:
|
||||
// semver 2.0.0 requires that version strings are in their
|
||||
// minimal form (in particular, no leading zeros). Therefore a
|
||||
// strict "exact equal" check can do a string comparison.
|
||||
if m.attribute.VersionValue == nil || *attribute.VersionValue != *m.attribute.VersionValue {
|
||||
m.logger.V(7).Info("Version values different")
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Unknown value type, cannot match.
|
||||
m.logger.V(7).Info("Match attribute type unknown")
|
||||
return false
|
||||
}
|
||||
|
||||
m.numDevices++
|
||||
m.logger.V(7).Info("Constraint satisfied by device", "device", deviceID, "numDevices", m.numDevices)
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *matchAttributeConstraint) remove(requestName string, device *draapi.BasicDevice, deviceID DeviceID) {
|
||||
if m.requestNames.Len() > 0 && !m.requestNames.Has(requestName) {
|
||||
// Device not affected by constraint.
|
||||
return
|
||||
}
|
||||
|
||||
m.numDevices--
|
||||
m.logger.V(7).Info("Device removed from constraint set", "device", deviceID, "numDevices", m.numDevices)
|
||||
}
|
||||
|
||||
func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
||||
// Fully-qualified match?
|
||||
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
|
||||
return &attr
|
||||
}
|
||||
index := strings.Index(string(attributeName), "/")
|
||||
if index < 0 {
|
||||
// Should not happen for a valid fully qualified name.
|
||||
return nil
|
||||
}
|
||||
|
||||
if string(attributeName[0:index]) != deviceID.Driver.String() {
|
||||
// Not an attribute of the driver and not found above,
|
||||
// so it is not available.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Domain matches the driver, so let's check just the ID.
|
||||
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName[index+1:])]; ok {
|
||||
return &attr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allocateOne iterates over all eligible devices (not in use, match selector,
|
||||
// satisfy constraints) for a specific required device. It returns true if
|
||||
// everything got allocated, an error if allocation needs to stop.
|
||||
func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
||||
if r.claimIndex >= len(alloc.claimsToAllocate) {
|
||||
// Done! If we were doing scoring, we would compare the current allocation result
|
||||
// against the previous one, keep the best, and continue. Without scoring, we stop
|
||||
// and use the first solution.
|
||||
alloc.logger.V(6).Info("Allocation result found")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
claim := alloc.claimsToAllocate[r.claimIndex]
|
||||
if r.requestIndex >= len(claim.Spec.Devices.Requests) {
|
||||
// Done with the claim, continue with the next one.
|
||||
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex + 1})
|
||||
}
|
||||
|
||||
// We already know how many devices per request are needed.
|
||||
// Ready to move on to the next request?
|
||||
requestData := alloc.requestData[requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex}]
|
||||
if r.deviceIndex >= requestData.numDevices {
|
||||
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex + 1})
|
||||
}
|
||||
|
||||
request := &alloc.claimsToAllocate[r.claimIndex].Spec.Devices.Requests[r.requestIndex]
|
||||
doAllDevices := request.AllocationMode == resourceapi.DeviceAllocationModeAll
|
||||
alloc.logger.V(6).Info("Allocating one device", "currentClaim", r.claimIndex, "totalClaims", len(alloc.claimsToAllocate), "currentRequest", r.requestIndex, "totalRequestsPerClaim", len(claim.Spec.Devices.Requests), "currentDevice", r.deviceIndex, "devicesPerRequest", requestData.numDevices, "allDevices", doAllDevices, "adminAccess", request.AdminAccess)
|
||||
|
||||
if doAllDevices {
|
||||
// For "all" devices we already know which ones we need. We
|
||||
// just need to check whether we can use them.
|
||||
deviceWithID := requestData.allDevices[r.deviceIndex]
|
||||
success, _, err := alloc.allocateDevice(r, deviceWithID, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !success {
|
||||
// The order in which we allocate "all" devices doesn't matter,
|
||||
// so we only try with the one which was up next. If we couldn't
|
||||
// get all of them, then there is no solution and we have to stop.
|
||||
return false, errStop
|
||||
}
|
||||
done, err := alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, deviceIndex: r.deviceIndex + 1})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !done {
|
||||
// Backtrack.
|
||||
return false, nil
|
||||
}
|
||||
return done, nil
|
||||
}
|
||||
|
||||
// We need to find suitable devices.
|
||||
for _, pool := range alloc.pools {
|
||||
for _, slice := range pool.Slices {
|
||||
for deviceIndex := range slice.Spec.Devices {
|
||||
deviceID := DeviceID{Driver: pool.Driver, Pool: pool.Pool, Device: slice.Spec.Devices[deviceIndex].Name}
|
||||
|
||||
// Checking for "in use" is cheap and thus gets done first.
|
||||
if !ptr.Deref(request.AdminAccess, false) && (alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) {
|
||||
alloc.logger.V(7).Info("Device in use", "device", deviceID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Next check selectors.
|
||||
selectable, err := alloc.isSelectable(requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex}, slice, deviceIndex)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !selectable {
|
||||
alloc.logger.V(7).Info("Device not selectable", "device", deviceID)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the pool is not valid, then fail now. It's okay when pools of one driver
|
||||
// are invalid if we allocate from some other pool, but it's not safe to
|
||||
// allocated from an invalid pool.
|
||||
if pool.IsInvalid {
|
||||
return false, fmt.Errorf("pool %s is invalid: %s", pool.Pool, pool.InvalidReason)
|
||||
}
|
||||
|
||||
// Finally treat as allocated and move on to the next device.
|
||||
device := deviceWithID{
|
||||
id: deviceID,
|
||||
basic: slice.Spec.Devices[deviceIndex].Basic,
|
||||
slice: slice,
|
||||
}
|
||||
allocated, deallocate, err := alloc.allocateDevice(r, device, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !allocated {
|
||||
// In use or constraint violated...
|
||||
alloc.logger.V(7).Info("Device not usable", "device", deviceID)
|
||||
continue
|
||||
}
|
||||
done, err := alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, deviceIndex: r.deviceIndex + 1})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If we found a solution, then we can stop.
|
||||
if done {
|
||||
return done, nil
|
||||
}
|
||||
|
||||
// Otherwise try some other device after rolling back.
|
||||
deallocate()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here without finding a solution, then there is none.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isSelectable checks whether a device satisfies the request and class selectors.
|
||||
func (alloc *allocator) isSelectable(r requestIndices, slice *draapi.ResourceSlice, deviceIndex int) (bool, error) {
|
||||
// This is the only supported device type at the moment.
|
||||
device := slice.Spec.Devices[deviceIndex].Basic
|
||||
if device == nil {
|
||||
// Must be some future, unknown device type. We cannot select it.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
deviceID := DeviceID{Driver: slice.Spec.Driver, Pool: slice.Spec.Pool.Name, Device: slice.Spec.Devices[deviceIndex].Name}
|
||||
matchKey := matchKey{DeviceID: deviceID, requestIndices: r}
|
||||
if matches, ok := alloc.deviceMatchesRequest[matchKey]; ok {
|
||||
// No need to check again.
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
requestData := alloc.requestData[r]
|
||||
if requestData.class != nil {
|
||||
match, err := alloc.selectorsMatch(r, device, deviceID, requestData.class, requestData.class.Spec.Selectors)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !match {
|
||||
alloc.deviceMatchesRequest[matchKey] = false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
request := &alloc.claimsToAllocate[r.claimIndex].Spec.Devices.Requests[r.requestIndex]
|
||||
match, err := alloc.selectorsMatch(r, device, deviceID, nil, request.Selectors)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !match {
|
||||
alloc.deviceMatchesRequest[matchKey] = false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
alloc.deviceMatchesRequest[matchKey] = true
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDevice, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) {
|
||||
for i, selector := range selectors {
|
||||
expr := alloc.celCache.GetOrCompile(selector.CEL.Expression)
|
||||
if expr.Error != nil {
|
||||
// Could happen if some future apiserver accepted some
|
||||
// future expression and then got downgraded. Normally
|
||||
// the "stored expression" mechanism prevents that, but
|
||||
// this code here might be more than one release older
|
||||
// than the cluster it runs in.
|
||||
if class != nil {
|
||||
return false, fmt.Errorf("class %s: selector #%d: CEL compile error: %w", class.Name, i, expr.Error)
|
||||
}
|
||||
return false, fmt.Errorf("claim %s: selector #%d: CEL compile error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, expr.Error)
|
||||
}
|
||||
|
||||
// If this conversion turns out to be expensive, the CEL package could be converted
|
||||
// to use unique strings.
|
||||
var d resourceapi.BasicDevice
|
||||
if err := draapi.Convert_api_BasicDevice_To_v1beta1_BasicDevice(device, &d, nil); err != nil {
|
||||
return false, fmt.Errorf("convert BasicDevice: %w", err)
|
||||
}
|
||||
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: d.Attributes, Capacity: d.Capacity})
|
||||
if class != nil {
|
||||
alloc.logger.V(7).Info("CEL result", "device", deviceID, "class", klog.KObj(class), "selector", i, "expression", selector.CEL.Expression, "matches", matches, "actualCost", ptr.Deref(details.ActualCost(), 0), "err", err)
|
||||
} else {
|
||||
alloc.logger.V(7).Info("CEL result", "device", deviceID, "claim", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), "selector", i, "expression", selector.CEL.Expression, "actualCost", ptr.Deref(details.ActualCost(), 0), "matches", matches, "err", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// TODO (future): more detailed errors which reference class resp. claim.
|
||||
if class != nil {
|
||||
return false, fmt.Errorf("class %s: selector #%d: CEL runtime error: %w", class.Name, i, err)
|
||||
}
|
||||
return false, fmt.Errorf("claim %s: selector #%d: CEL runtime error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, err)
|
||||
}
|
||||
if !matches {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All of them match.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// allocateDevice checks device availability and constraints for one
|
||||
// candidate. The device must be selectable.
|
||||
//
|
||||
// If that candidate works out okay, the shared state gets updated
|
||||
// as if that candidate had been allocated. If allocation cannot continue later
|
||||
// and must try something else, then the rollback function can be invoked to
|
||||
// restore the previous state.
|
||||
func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, must bool) (bool, func(), error) {
|
||||
claim := alloc.claimsToAllocate[r.claimIndex]
|
||||
request := &claim.Spec.Devices.Requests[r.requestIndex]
|
||||
adminAccess := ptr.Deref(request.AdminAccess, false)
|
||||
if !adminAccess && (alloc.allocatedDevices.Has(device.id) || alloc.allocatingDevices[device.id]) {
|
||||
alloc.logger.V(7).Info("Device in use", "device", device.id)
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// It's available. Now check constraints.
|
||||
for i, constraint := range alloc.constraints[r.claimIndex] {
|
||||
added := constraint.add(request.Name, device.basic, device.id)
|
||||
if !added {
|
||||
if must {
|
||||
// It does not make sense to declare a claim where a constraint prevents getting
|
||||
// all devices. Treat this as an error.
|
||||
return false, nil, fmt.Errorf("claim %s, request %s: cannot add device %s because a claim constraint would not be satisfied", klog.KObj(claim), request.Name, device.id)
|
||||
}
|
||||
|
||||
// Roll back for all previous constraints before we return.
|
||||
for e := 0; e < i; e++ {
|
||||
alloc.constraints[r.claimIndex][e].remove(request.Name, device.basic, device.id)
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All constraints satisfied. Mark as in use (unless we do admin access)
|
||||
// and record the result.
|
||||
alloc.logger.V(7).Info("Device allocated", "device", device.id)
|
||||
if !adminAccess {
|
||||
alloc.allocatingDevices[device.id] = true
|
||||
}
|
||||
result := internalDeviceResult{
|
||||
request: request.Name,
|
||||
id: device.id,
|
||||
slice: device.slice,
|
||||
}
|
||||
if adminAccess {
|
||||
result.adminAccess = &adminAccess
|
||||
}
|
||||
previousNumResults := len(alloc.result[r.claimIndex].devices)
|
||||
alloc.result[r.claimIndex].devices = append(alloc.result[r.claimIndex].devices, result)
|
||||
|
||||
return true, func() {
|
||||
for _, constraint := range alloc.constraints[r.claimIndex] {
|
||||
constraint.remove(request.Name, device.basic, device.id)
|
||||
}
|
||||
if !adminAccess {
|
||||
alloc.allocatingDevices[device.id] = false
|
||||
}
|
||||
// Truncate, but keep the underlying slice.
|
||||
alloc.result[r.claimIndex].devices = alloc.result[r.claimIndex].devices[:previousNumResults]
|
||||
alloc.logger.V(7).Info("Device deallocated", "device", device.id)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createNodeSelector constructs a node selector for the allocation, if needed,
|
||||
// otherwise it returns nil.
|
||||
func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.NodeSelector, error) {
|
||||
// Selector with one term. That term gets extended with additional
|
||||
// requirements from the different devices.
|
||||
nodeSelector := &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{}},
|
||||
}
|
||||
|
||||
for i := range result {
|
||||
slice := result[i].slice
|
||||
if slice.Spec.NodeName != draapi.NullUniqueString {
|
||||
// At least one device is local to one node. This
|
||||
// restricts the allocation to that node.
|
||||
return &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchFields: []v1.NodeSelectorRequirement{{
|
||||
Key: "metadata.name",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{slice.Spec.NodeName.String()},
|
||||
}},
|
||||
}},
|
||||
}, nil
|
||||
}
|
||||
if slice.Spec.NodeSelector != nil {
|
||||
switch len(slice.Spec.NodeSelector.NodeSelectorTerms) {
|
||||
case 0:
|
||||
// Nothing?
|
||||
case 1:
|
||||
// Add all terms if they are not present already.
|
||||
addNewNodeSelectorRequirements(slice.Spec.NodeSelector.NodeSelectorTerms[0].MatchFields, &nodeSelector.NodeSelectorTerms[0].MatchFields)
|
||||
addNewNodeSelectorRequirements(slice.Spec.NodeSelector.NodeSelectorTerms[0].MatchExpressions, &nodeSelector.NodeSelectorTerms[0].MatchExpressions)
|
||||
default:
|
||||
// This shouldn't occur, validation must prevent creation of such slices.
|
||||
return nil, fmt.Errorf("unsupported ResourceSlice.NodeSelector with %d terms", len(slice.Spec.NodeSelector.NodeSelectorTerms))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodeSelector.NodeSelectorTerms[0].MatchFields) > 0 || len(nodeSelector.NodeSelectorTerms[0].MatchExpressions) > 0 {
|
||||
// We have a valid node selector.
|
||||
return nodeSelector, nil
|
||||
}
|
||||
|
||||
// Available everywhere.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func addNewNodeSelectorRequirements(from []v1.NodeSelectorRequirement, to *[]v1.NodeSelectorRequirement) {
|
||||
for _, requirement := range from {
|
||||
if !containsNodeSelectorRequirement(*to, requirement) {
|
||||
*to = append(*to, requirement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func containsNodeSelectorRequirement(requirements []v1.NodeSelectorRequirement, requirement v1.NodeSelectorRequirement) bool {
|
||||
values := sets.New(requirement.Values...)
|
||||
for _, existingRequirement := range requirements {
|
||||
if existingRequirement.Key != requirement.Key {
|
||||
continue
|
||||
}
|
||||
if existingRequirement.Operator != requirement.Operator {
|
||||
continue
|
||||
}
|
||||
if !sets.New(existingRequirement.Values...).Equal(values) {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
18
vendor/k8s.io/dynamic-resource-allocation/structured/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/dynamic-resource-allocation/structured/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package structured contains code for working with structured parameters.
|
||||
package structured
|
166
vendor/k8s.io/dynamic-resource-allocation/structured/pools.go
generated
vendored
Normal file
166
vendor/k8s.io/dynamic-resource-allocation/structured/pools.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package structured
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
draapi "k8s.io/dynamic-resource-allocation/api"
|
||||
)
|
||||
|
||||
// GatherPools collects information about all resource pools which provide
|
||||
// devices that are accessible from the given node.
|
||||
//
|
||||
// Out-dated slices are silently ignored. Pools may be incomplete (not all
|
||||
// required slices available) or invalid (for example, device names not unique).
|
||||
// Both is recorded in the result.
|
||||
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node) ([]*Pool, error) {
|
||||
pools := make(map[PoolID]*Pool)
|
||||
nodeName := ""
|
||||
if node != nil {
|
||||
nodeName = node.Name
|
||||
}
|
||||
|
||||
for _, slice := range slices {
|
||||
switch {
|
||||
case slice.Spec.NodeName != "":
|
||||
if slice.Spec.NodeName == nodeName {
|
||||
if err := addSlice(pools, slice); err != nil {
|
||||
return nil, fmt.Errorf("add node slice %s: %w", slice.Name, err)
|
||||
}
|
||||
}
|
||||
case slice.Spec.AllNodes:
|
||||
if err := addSlice(pools, slice); err != nil {
|
||||
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
|
||||
}
|
||||
case slice.Spec.NodeSelector != nil:
|
||||
// TODO: move conversion into api.
|
||||
selector, err := nodeaffinity.NewNodeSelector(slice.Spec.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("node selector in resource slice %s: %w", slice.Name, err)
|
||||
}
|
||||
if selector.Match(node) {
|
||||
if err := addSlice(pools, slice); err != nil {
|
||||
return nil, fmt.Errorf("add matching slice %s: %w", slice.Name, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Nothing known was set. This must be some future, unknown extension,
|
||||
// so we don't know how to handle it. We may still be able to allocated from
|
||||
// other pools, so we continue.
|
||||
//
|
||||
// TODO (eventually): let caller decide how to report this to the user. Warning
|
||||
// about it for every slice on each scheduling attempt would be too noisy, but
|
||||
// perhaps once per run would be useful?
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Find incomplete pools and flatten into a single slice.
|
||||
result := make([]*Pool, 0, len(pools))
|
||||
for _, pool := range pools {
|
||||
pool.IsIncomplete = int64(len(pool.Slices)) != pool.Slices[0].Spec.Pool.ResourceSliceCount
|
||||
pool.IsInvalid, pool.InvalidReason = poolIsInvalid(pool)
|
||||
result = append(result, pool)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice) error {
|
||||
var slice draapi.ResourceSlice
|
||||
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, nil); err != nil {
|
||||
return fmt.Errorf("convert ResourceSlice: %w", err)
|
||||
}
|
||||
|
||||
id := PoolID{Driver: slice.Spec.Driver, Pool: slice.Spec.Pool.Name}
|
||||
pool := pools[id]
|
||||
if pool == nil {
|
||||
// New pool.
|
||||
pool = &Pool{
|
||||
PoolID: id,
|
||||
Slices: []*draapi.ResourceSlice{&slice},
|
||||
}
|
||||
pools[id] = pool
|
||||
return nil
|
||||
}
|
||||
|
||||
if slice.Spec.Pool.Generation < pool.Slices[0].Spec.Pool.Generation {
|
||||
// Out-dated.
|
||||
return nil
|
||||
}
|
||||
|
||||
if slice.Spec.Pool.Generation > pool.Slices[0].Spec.Pool.Generation {
|
||||
// Newer, replaces all old slices.
|
||||
pool.Slices = nil
|
||||
}
|
||||
|
||||
// Add to pool.
|
||||
pool.Slices = append(pool.Slices, &slice)
|
||||
return nil
|
||||
}
|
||||
|
||||
func poolIsInvalid(pool *Pool) (bool, string) {
|
||||
devices := sets.New[draapi.UniqueString]()
|
||||
for _, slice := range pool.Slices {
|
||||
for _, device := range slice.Spec.Devices {
|
||||
if devices.Has(device.Name) {
|
||||
return true, fmt.Sprintf("duplicate device name %s", device.Name)
|
||||
}
|
||||
devices.Insert(device.Name)
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
PoolID
|
||||
IsIncomplete bool
|
||||
IsInvalid bool
|
||||
InvalidReason string
|
||||
Slices []*draapi.ResourceSlice
|
||||
}
|
||||
|
||||
type PoolID struct {
|
||||
Driver, Pool draapi.UniqueString
|
||||
}
|
||||
|
||||
func (p PoolID) String() string {
|
||||
return p.Driver.String() + "/" + p.Pool.String()
|
||||
}
|
||||
|
||||
type DeviceID struct {
|
||||
Driver, Pool, Device draapi.UniqueString
|
||||
}
|
||||
|
||||
func (d DeviceID) String() string {
|
||||
return d.Driver.String() + "/" + d.Pool.String() + "/" + d.Device.String()
|
||||
}
|
||||
|
||||
func MakeDeviceID(driver, pool, device string) DeviceID {
|
||||
return DeviceID{
|
||||
Driver: draapi.MakeUniqueString(driver),
|
||||
Pool: draapi.MakeUniqueString(pool),
|
||||
Device: draapi.MakeUniqueString(device),
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user