build: move e2e dependencies into e2e/go.mod

Several packages are only used while running the e2e suite. These
packages are less important to update, as the they can not influence the
final executable that is part of the Ceph-CSI container-image.

By moving these dependencies out of the main Ceph-CSI go.mod, it is
easier to identify if a reported CVE affects Ceph-CSI, or only the
testing (like most of the Kubernetes CVEs).

Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
Niels de Vos
2025-03-04 08:57:28 +01:00
committed by mergify[bot]
parent 15da101b1b
commit bec6090996
8047 changed files with 1407827 additions and 3453 deletions

49
e2e/vendor/k8s.io/client-go/util/apply/apply.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apply
import (
"fmt"
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/features"
"k8s.io/client-go/rest"
)
// NewRequest builds a new server-side apply request. The provided apply configuration object will
// be marshalled to the request's body using the default encoding, and the Content-Type header will
// be set to application/apply-patch with the appropriate structured syntax name suffix (today,
// either +yaml or +cbor, see
// https://www.iana.org/assignments/media-type-structured-suffix/media-type-structured-suffix.xhtml).
func NewRequest(client rest.Interface, applyConfiguration interface{}) (*rest.Request, error) {
pt := types.ApplyYAMLPatchType
marshal := json.Marshal
if features.FeatureGates().Enabled(features.ClientsAllowCBOR) && features.FeatureGates().Enabled(features.ClientsPreferCBOR) {
pt = types.ApplyCBORPatchType
marshal = cbor.Marshal
}
body, err := marshal(applyConfiguration)
if err != nil {
return nil, fmt.Errorf("failed to marshal apply configuration: %w", err)
}
return client.Patch(pt).Body(body), nil
}

8
e2e/vendor/k8s.io/client-go/util/cert/OWNERS generated vendored Normal file
View File

@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-auth-certificates-approvers
reviewers:
- sig-auth-certificates-reviewers
labels:
- sig/auth

230
e2e/vendor/k8s.io/client-go/util/cert/cert.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"bytes"
"crypto"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math"
"math/big"
"net"
"os"
"path/filepath"
"strings"
"time"
"k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
)
const duration365d = time.Hour * 24 * 365
// Config contains the basic fields required for creating a certificate
type Config struct {
CommonName string
Organization []string
AltNames AltNames
Usages []x509.ExtKeyUsage
NotBefore time.Time
}
// AltNames contains the domain names and IP addresses that will be added
// to the API Server's x509 certificate SubAltNames field. The values will
// be passed directly to the x509.Certificate object.
type AltNames struct {
DNSNames []string
IPs []net.IP
}
// NewSelfSignedCACert creates a CA certificate
func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
now := time.Now()
// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
if err != nil {
return nil, err
}
serial = new(big.Int).Add(serial, big.NewInt(1))
notBefore := now.UTC()
if !cfg.NotBefore.IsZero() {
notBefore = cfg.NotBefore.UTC()
}
tmpl := x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
DNSNames: []string{cfg.CommonName},
NotBefore: notBefore,
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
if err != nil {
return nil, err
}
return x509.ParseCertificate(certDERBytes)
}
// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
// Host may be an IP or a DNS name
// You may also specify additional subject alt names (either ip or dns names) for the certificate.
func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
}
// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
// for the certificate.
//
// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt
// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key
// Certs/keys not existing in that directory are created.
func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
maxAge := time.Hour * 24 * 365 // one year self-signed certs
baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
if len(fixtureDirectory) > 0 {
cert, err := os.ReadFile(certFixturePath)
if err == nil {
key, err := os.ReadFile(keyFixturePath)
if err == nil {
return cert, key, nil
}
return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
}
maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
}
caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
return nil, nil, err
}
// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
if err != nil {
return nil, nil, err
}
serial = new(big.Int).Add(serial, big.NewInt(1))
caTemplate := x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
},
NotBefore: validFrom,
NotAfter: validFrom.Add(maxAge),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
if err != nil {
return nil, nil, err
}
caCertificate, err := x509.ParseCertificate(caDERBytes)
if err != nil {
return nil, nil, err
}
priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
return nil, nil, err
}
// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
serial, err = cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
if err != nil {
return nil, nil, err
}
serial = new(big.Int).Add(serial, big.NewInt(1))
template := x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
},
NotBefore: validFrom,
NotAfter: validFrom.Add(maxAge),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
if ip := netutils.ParseIPSloppy(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, host)
}
template.IPAddresses = append(template.IPAddresses, alternateIPs...)
template.DNSNames = append(template.DNSNames, alternateDNS...)
derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey)
if err != nil {
return nil, nil, err
}
// Generate cert, followed by ca
certBuffer := bytes.Buffer{}
if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
return nil, nil, err
}
if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil {
return nil, nil, err
}
// Generate key
keyBuffer := bytes.Buffer{}
if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
return nil, nil, err
}
if len(fixtureDirectory) > 0 {
if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
}
if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil {
return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
}
}
return certBuffer.Bytes(), keyBuffer.Bytes(), nil
}
func ipsToStrings(ips []net.IP) []string {
ss := make([]string, 0, len(ips))
for _, ip := range ips {
ss = append(ss, ip.String())
}
return ss
}

75
e2e/vendor/k8s.io/client-go/util/cert/csr.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"net"
)
// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
template := &x509.CertificateRequest{
Subject: *subject,
DNSNames: dnsSANs,
IPAddresses: ipSANs,
}
return MakeCSRFromTemplate(privateKey, template)
}
// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
// key and certificate request as a template. All key types that are
// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
// and *ecdsa.PrivateKey.)
func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
t := *template
t.SignatureAlgorithm = sigType(privateKey)
csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
if err != nil {
return nil, err
}
csrPemBlock := &pem.Block{
Type: CertificateRequestBlockType,
Bytes: csrDER,
}
return pem.EncodeToMemory(csrPemBlock), nil
}
func sigType(privateKey interface{}) x509.SignatureAlgorithm {
// Customize the signature for RSA keys, depending on the key size
if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
keySize := privateKey.N.BitLen()
switch {
case keySize >= 4096:
return x509.SHA512WithRSA
case keySize >= 3072:
return x509.SHA384WithRSA
default:
return x509.SHA256WithRSA
}
}
return x509.UnknownSignatureAlgorithm
}

112
e2e/vendor/k8s.io/client-go/util/cert/io.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto/x509"
"fmt"
"os"
"path/filepath"
)
// CanReadCertAndKey returns true if the certificate and key files already exists,
// otherwise returns false. If lost one of cert and key, returns error.
func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
certReadable := canReadFile(certPath)
keyReadable := canReadFile(keyPath)
if certReadable == false && keyReadable == false {
return false, nil
}
if certReadable == false {
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
}
if keyReadable == false {
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
}
return true, nil
}
// If the file represented by path exists and
// readable, returns true otherwise returns false.
func canReadFile(path string) bool {
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
return true
}
// WriteCert writes the pem-encoded certificate data to certPath.
// The certificate file will be created with file mode 0644.
// If the certificate file already exists, it will be overwritten.
// The parent directory of the certPath will be created as needed with file mode 0755.
func WriteCert(certPath string, data []byte) error {
if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
return err
}
return os.WriteFile(certPath, data, os.FileMode(0644))
}
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPool(filename string) (*x509.CertPool, error) {
pemBlock, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
pool, err := NewPoolFromBytes(pemBlock)
if err != nil {
return nil, fmt.Errorf("error creating pool from %s: %s", filename, err)
}
return pool, nil
}
// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) {
certs, err := ParseCertsPEM(pemBlock)
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
for _, cert := range certs {
pool.AddCert(cert)
}
return pool, nil
}
// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func CertsFromFile(file string) ([]*x509.Certificate, error) {
pemBlock, err := os.ReadFile(file)
if err != nil {
return nil, err
}
certs, err := ParseCertsPEM(pemBlock)
if err != nil {
return nil, fmt.Errorf("error reading %s: %s", file, err)
}
return certs, nil
}

73
e2e/vendor/k8s.io/client-go/util/cert/pem.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"bytes"
"crypto/x509"
"encoding/pem"
"errors"
)
const (
// CertificateBlockType is a possible value for pem.Block.Type.
CertificateBlockType = "CERTIFICATE"
// CertificateRequestBlockType is a possible value for pem.Block.Type.
CertificateRequestBlockType = "CERTIFICATE REQUEST"
)
// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
ok := false
certs := []*x509.Certificate{}
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
// Only use PEM "CERTIFICATE" blocks without extra headers
if block.Type != CertificateBlockType || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return certs, err
}
certs = append(certs, cert)
ok = true
}
if !ok {
return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
}
return certs, nil
}
// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs.
func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
b := bytes.Buffer{}
for _, cert := range certs {
if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil {
return []byte{}, err
}
}
return b.Bytes(), nil
}

View File

@ -0,0 +1,102 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net/url"
"strings"
)
// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the
// state of particular servers. apiHost is "host:port"
func GetClientCANames(apiHost string) ([]string, error) {
// when we run this the second time, we know which one we are expecting
acceptableCAs := []string{}
tlsConfig := &tls.Config{
InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
acceptableCAs = []string{}
for _, curr := range hello.AcceptableCAs {
acceptableCAs = append(acceptableCAs, string(curr))
}
return &tls.Certificate{}, nil
},
}
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
if err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return acceptableCAs, nil
}
// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
apiserverURL, err := url.Parse(kubeConfigURL)
if err != nil {
return nil, err
}
return GetClientCANames(apiserverURL.Host)
}
// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port"
func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: true, // this is insecure so that we always get connected
}
// if a name is specified for SNI, set it.
if len(serverName) > 0 {
tlsConfig.ServerName = serverName
}
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
if err != nil {
return nil, nil, err
}
if err = conn.Close(); err != nil {
return nil, nil, fmt.Errorf("failed to close connection : %v", err)
}
peerCerts := conn.ConnectionState().PeerCertificates
peerCertBytes := [][]byte{}
for _, a := range peerCerts {
actualCert, err := EncodeCertificates(a)
if err != nil {
return nil, nil, err
}
peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
}
return peerCerts, peerCertBytes, err
}
// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
apiserverURL, err := url.Parse(kubeConfigURL)
if err != nil {
return nil, nil, err
}
return GetServingCertificates(apiserverURL.Host, serverName)
}

View File

@ -0,0 +1,133 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package connrotation implements a connection dialer that tracks and can close
// all created connections.
//
// This is used for credential rotation of long-lived connections, when there's
// no way to re-authenticate on a live connection.
package connrotation
import (
"context"
"net"
"sync"
)
// DialFunc is a shorthand for signature of net.DialContext.
type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
// Dialer opens connections through Dial and tracks them.
type Dialer struct {
dial DialFunc
*ConnectionTracker
}
// NewDialer creates a new Dialer instance.
// Equivalent to NewDialerWithTracker(dial, nil).
func NewDialer(dial DialFunc) *Dialer {
return NewDialerWithTracker(dial, nil)
}
// NewDialerWithTracker creates a new Dialer instance.
//
// If dial is not nil, it will be used to create new underlying connections.
// Otherwise net.DialContext is used.
// If tracker is not nil, it will be used to track new underlying connections.
// Otherwise NewConnectionTracker() is used.
func NewDialerWithTracker(dial DialFunc, tracker *ConnectionTracker) *Dialer {
if tracker == nil {
tracker = NewConnectionTracker()
}
return &Dialer{
dial: dial,
ConnectionTracker: tracker,
}
}
// ConnectionTracker keeps track of opened connections
type ConnectionTracker struct {
mu sync.Mutex
conns map[*closableConn]struct{}
}
// NewConnectionTracker returns a connection tracker for use with NewDialerWithTracker
func NewConnectionTracker() *ConnectionTracker {
return &ConnectionTracker{
conns: make(map[*closableConn]struct{}),
}
}
// CloseAll forcibly closes all tracked connections.
//
// Note: new connections may get created before CloseAll returns.
func (c *ConnectionTracker) CloseAll() {
c.mu.Lock()
conns := c.conns
c.conns = make(map[*closableConn]struct{})
c.mu.Unlock()
for conn := range conns {
conn.Close()
}
}
// Track adds the connection to the list of tracked connections,
// and returns a wrapped copy of the connection that stops tracking the connection
// when it is closed.
func (c *ConnectionTracker) Track(conn net.Conn) net.Conn {
closable := &closableConn{Conn: conn}
// When the connection is closed, remove it from the map. This will
// be no-op if the connection isn't in the map, e.g. if CloseAll()
// is called.
closable.onClose = func() {
c.mu.Lock()
delete(c.conns, closable)
c.mu.Unlock()
}
// Start tracking the connection
c.mu.Lock()
c.conns[closable] = struct{}{}
c.mu.Unlock()
return closable
}
// Dial creates a new tracked connection.
func (d *Dialer) Dial(network, address string) (net.Conn, error) {
return d.DialContext(context.Background(), network, address)
}
// DialContext creates a new tracked connection.
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
conn, err := d.dial(ctx, network, address)
if err != nil {
return nil, err
}
return d.ConnectionTracker.Track(conn), nil
}
type closableConn struct {
onClose func()
net.Conn
}
func (c *closableConn) Close() error {
go c.onClose()
return c.Conn.Close()
}

View File

@ -0,0 +1,146 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consistencydetector
import (
"context"
"fmt"
"sort"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
type RetrieveItemsFunc[U any] func() []U
type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error)
// CheckDataConsistency exists solely for testing purposes.
// we cannot use checkWatchListDataConsistencyIfRequested because
// it is guarded by an environmental variable.
// we cannot manipulate the environmental variable because
// it will affect other tests in this package.
func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) {
if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) {
klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity)
return
}
klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity)
retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn())
listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems))
var list runtime.Object
err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) {
list, err = listFn(ctx, listOptions)
if err != nil {
// the consistency check will only be enabled in the CI
// and LIST calls in general will be retired by the client-go library
// if we fail simply log and retry
klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err)
return false, nil
}
return true, nil
})
if err != nil {
klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err)
return
}
rawListItems, err := meta.ExtractListWithAlloc(list)
if err != nil {
panic(err) // this should never happen
}
listItems := toMetaObjectSliceOrDie(rawListItems)
sort.Sort(byUID(listItems))
sort.Sort(byUID(retrievedItems))
if !cmp.Equal(listItems, retrievedItems) {
klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems))
msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity)
panic(msg)
}
}
// canFormAdditionalListCall ensures that we can form a valid LIST requests
// for checking data consistency.
func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool {
// since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact
// we need to make sure that the continuation hasn't been set
// https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38
if len(listOptions.Continue) > 0 {
return false
}
// since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact
// we need to make sure that the RV is valid because the validation code forbids RV == "0"
// https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44
if lastSyncedResourceVersion == "0" {
return false
}
return true
}
// prepareListCallOptions changes the input list options so that
// the list call goes directly to etcd
func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions {
// this is our legacy case:
//
// the watch cache skips the Limit if the ResourceVersion was set to "0"
// thus, to compare with data retrieved directly from etcd
// we need to skip the limit to for the list call as well.
//
// note that when the number of retrieved items is less than the request limit,
// it means either the watch cache is disabled, or there is not enough data.
// in both cases, we can use the limit because we will be able to compare
// the data with the items retrieved from etcd.
if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit {
listOptions.Limit = 0
}
// set the RV and RVM so that we get the snapshot of data
// directly from etcd.
listOptions.ResourceVersion = lastSyncedResourceVersion
listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact
return listOptions
}
type byUID []metav1.Object
func (a byUID) Len() int { return len(a) }
func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() }
func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object {
result := make([]metav1.Object, len(s))
for i, v := range s {
m, err := meta.Accessor(v)
if err != nil {
panic(err)
}
result[i] = m
}
return result
}

View File

@ -0,0 +1,76 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consistencydetector
import (
"context"
"os"
"strconv"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var dataConsistencyDetectionForListFromCacheEnabled = false
func init() {
dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR"))
}
// IsDataConsistencyDetectionForListEnabled returns true when
// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
func IsDataConsistencyDetectionForListEnabled() bool {
return dataConsistencyDetectionForListFromCacheEnabled
}
// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when
// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup
// for requests that have a high chance of being served from the watch-cache.
//
// The consistency check is meant to be enforced only in the CI, not in production.
// The check ensures that data retrieved by a list api call from the watch-cache
// is exactly the same as data received by the list api call from etcd.
//
// Note that this function will panic when data inconsistency is detected.
// This is intentional because we want to catch it in the CI.
//
// Note that this function doesn't examine the ListOptions to determine
// if the original request has hit the cache because it would be challenging
// to maintain consistency with the server-side implementation.
// For simplicity, we assume that the first request retrieved data from
// the cache (even though this might not be true for some requests)
// and issue the second call to get data from etcd for comparison.
func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
if !IsDataConsistencyDetectionForListEnabled() {
return
}
checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList)
}
func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
receivedListMeta, err := meta.ListAccessor(receivedList)
if err != nil {
panic(err)
}
rawListItems, err := meta.ExtractListWithAlloc(receivedList)
if err != nil {
panic(err) // this should never happen
}
lastSyncedResourceVersion := receivedListMeta.GetResourceVersion()
CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems })
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consistencydetector
import (
"context"
"os"
"strconv"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var dataConsistencyDetectionForWatchListEnabled = false
func init() {
dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR"))
}
// IsDataConsistencyDetectionForWatchListEnabled returns true when
// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
func IsDataConsistencyDetectionForWatchListEnabled() bool {
return dataConsistencyDetectionForWatchListEnabled
}
// CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when
// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
//
// The consistency check is meant to be enforced only in the CI, not in production.
// The check ensures that data retrieved by the watch-list api call
// is exactly the same as data received by the standard list api call against etcd.
//
// Note that this function will panic when data inconsistency is detected.
// This is intentional because we want to catch it in the CI.
func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
if !IsDataConsistencyDetectionForWatchListEnabled() {
return
}
checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList)
}

52
e2e/vendor/k8s.io/client-go/util/exec/exec.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exec
// ExitError is an interface that presents an API similar to os.ProcessState, which is
// what ExitError from os/exec is. This is designed to make testing a bit easier and
// probably loses some of the cross-platform properties of the underlying library.
type ExitError interface {
String() string
Error() string
Exited() bool
ExitStatus() int
}
// CodeExitError is an implementation of ExitError consisting of an error object
// and an exit code (the upper bits of os.exec.ExitStatus).
type CodeExitError struct {
Err error
Code int
}
var _ ExitError = CodeExitError{}
func (e CodeExitError) Error() string {
return e.Err.Error()
}
func (e CodeExitError) String() string {
return e.Err.Error()
}
func (e CodeExitError) Exited() bool {
return true
}
func (e CodeExitError) ExitStatus() int {
return e.Code
}

188
e2e/vendor/k8s.io/client-go/util/flowcontrol/backoff.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"math/rand"
"sync"
"time"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
)
type backoffEntry struct {
backoff time.Duration
lastUpdate time.Time
}
type Backoff struct {
sync.RWMutex
Clock clock.Clock
// HasExpiredFunc controls the logic that determines whether the backoff
// counter should be reset, and when to GC old backoff entries. If nil, the
// default hasExpired function will restart the backoff factor to the
// beginning after observing time has passed at least equal to 2*maxDuration
HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool
defaultDuration time.Duration
maxDuration time.Duration
perItemBackoff map[string]*backoffEntry
rand *rand.Rand
// maxJitterFactor adds jitter to the exponentially backed off delay.
// if maxJitterFactor is zero, no jitter is added to the delay in
// order to maintain current behavior.
maxJitterFactor float64
}
func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
return newBackoff(tc, initial, max, 0.0)
}
func NewBackOff(initial, max time.Duration) *Backoff {
return NewBackOffWithJitter(initial, max, 0.0)
}
func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff {
return newBackoff(tc, initial, max, maxJitterFactor)
}
func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff {
clock := clock.RealClock{}
return newBackoff(clock, initial, max, maxJitterFactor)
}
func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff {
var random *rand.Rand
if maxJitterFactor > 0 {
random = rand.New(rand.NewSource(clock.Now().UnixNano()))
}
return &Backoff{
perItemBackoff: map[string]*backoffEntry{},
Clock: clock,
defaultDuration: initial,
maxDuration: max,
maxJitterFactor: maxJitterFactor,
rand: random,
}
}
// Get the current backoff Duration
func (p *Backoff) Get(id string) time.Duration {
p.RLock()
defer p.RUnlock()
var delay time.Duration
entry, ok := p.perItemBackoff[id]
if ok {
delay = entry.backoff
}
return delay
}
// move backoff to the next mark, capping at maxDuration
func (p *Backoff) Next(id string, eventTime time.Time) {
p.Lock()
defer p.Unlock()
entry, ok := p.perItemBackoff[id]
if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
entry = p.initEntryUnsafe(id)
entry.backoff += p.jitter(entry.backoff)
} else {
delay := entry.backoff * 2 // exponential
delay += p.jitter(entry.backoff) // add some jitter to the delay
entry.backoff = min(delay, p.maxDuration)
}
entry.lastUpdate = p.Clock.Now()
}
// Reset forces clearing of all backoff data for a given key.
func (p *Backoff) Reset(id string) {
p.Lock()
defer p.Unlock()
delete(p.perItemBackoff, id)
}
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false
}
if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Since(eventTime) < entry.backoff
}
// Returns True if time since lastupdate is less than the current backoff window.
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false
}
if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return eventTime.Sub(entry.lastUpdate) < entry.backoff
}
// Garbage collect records that have aged past their expiration, which defaults
// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke
// this periodically.
func (p *Backoff) GC() {
p.Lock()
defer p.Unlock()
now := p.Clock.Now()
for id, entry := range p.perItemBackoff {
if p.hasExpired(now, entry.lastUpdate, p.maxDuration) {
delete(p.perItemBackoff, id)
}
}
}
func (p *Backoff) DeleteEntry(id string) {
p.Lock()
defer p.Unlock()
delete(p.perItemBackoff, id)
}
// Take a lock on *Backoff, before calling initEntryUnsafe
func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
entry := &backoffEntry{backoff: p.defaultDuration}
p.perItemBackoff[id] = entry
return entry
}
func (p *Backoff) jitter(delay time.Duration) time.Duration {
if p.rand == nil {
return 0
}
return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
}
// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning
func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
if p.HasExpiredFunc != nil {
return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration)
}
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
}

View File

@ -0,0 +1,192 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"context"
"errors"
"sync"
"time"
"golang.org/x/time/rate"
"k8s.io/utils/clock"
)
type PassiveRateLimiter interface {
// TryAccept returns true if a token is taken immediately. Otherwise,
// it returns false.
TryAccept() bool
// Stop stops the rate limiter, subsequent calls to CanAccept will return false
Stop()
// QPS returns QPS of this rate limiter
QPS() float32
}
type RateLimiter interface {
PassiveRateLimiter
// Accept returns once a token becomes available.
Accept()
// Wait returns nil if a token is taken before the Context is done.
Wait(ctx context.Context) error
}
type tokenBucketPassiveRateLimiter struct {
limiter *rate.Limiter
qps float32
clock clock.PassiveClock
}
type tokenBucketRateLimiter struct {
tokenBucketPassiveRateLimiter
clock Clock
}
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
// smoothed qps rate of 'qps'.
// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
// The maximum number of tokens in the bucket is capped at 'burst'.
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps)
}
// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns
// a PassiveRateLimiter which does not have Accept() and Wait() methods.
func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps)
}
// An injectable, mockable clock interface.
type Clock interface {
clock.PassiveClock
Sleep(time.Duration)
}
var _ Clock = (*clock.RealClock)(nil)
// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
// but allows an injectable clock, for testing.
func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithClock(limiter, c, qps)
}
// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock
// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods
// and uses a PassiveClock.
func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps)
}
func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter {
return &tokenBucketRateLimiter{
tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps),
clock: c,
}
}
func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter {
return &tokenBucketPassiveRateLimiter{
limiter: limiter,
qps: qps,
clock: c,
}
}
func (tbprl *tokenBucketPassiveRateLimiter) Stop() {
}
func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 {
return tbprl.qps
}
func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool {
return tbprl.limiter.AllowN(tbprl.clock.Now(), 1)
}
// Accept will block until a token becomes available
func (tbrl *tokenBucketRateLimiter) Accept() {
now := tbrl.clock.Now()
tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now))
}
func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error {
return tbrl.limiter.Wait(ctx)
}
type fakeAlwaysRateLimiter struct{}
func NewFakeAlwaysRateLimiter() RateLimiter {
return &fakeAlwaysRateLimiter{}
}
func (t *fakeAlwaysRateLimiter) TryAccept() bool {
return true
}
func (t *fakeAlwaysRateLimiter) Stop() {}
func (t *fakeAlwaysRateLimiter) Accept() {}
func (t *fakeAlwaysRateLimiter) QPS() float32 {
return 1
}
func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
return nil
}
type fakeNeverRateLimiter struct {
wg sync.WaitGroup
}
func NewFakeNeverRateLimiter() RateLimiter {
rl := fakeNeverRateLimiter{}
rl.wg.Add(1)
return &rl
}
func (t *fakeNeverRateLimiter) TryAccept() bool {
return false
}
func (t *fakeNeverRateLimiter) Stop() {
t.wg.Done()
}
func (t *fakeNeverRateLimiter) Accept() {
t.wg.Wait()
}
func (t *fakeNeverRateLimiter) QPS() float32 {
return 1
}
func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
return errors.New("can not be accept")
}
var (
_ RateLimiter = (*tokenBucketRateLimiter)(nil)
_ RateLimiter = (*fakeAlwaysRateLimiter)(nil)
_ RateLimiter = (*fakeNeverRateLimiter)(nil)
)
var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil)

92
e2e/vendor/k8s.io/client-go/util/homedir/homedir.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package homedir
import (
"os"
"path/filepath"
"runtime"
)
// HomeDir returns the home directory for the current user.
// On Windows:
// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
func HomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOME")
homeDriveHomePath := ""
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
homeDriveHomePath = homeDrive + homePath
}
userProfile := os.Getenv("USERPROFILE")
// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
for _, p := range []string{home, homeDriveHomePath, userProfile} {
if len(p) == 0 {
continue
}
if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
continue
}
return p
}
firstSetPath := ""
firstExistingPath := ""
// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
for _, p := range []string{home, userProfile, homeDriveHomePath} {
if len(p) == 0 {
continue
}
if len(firstSetPath) == 0 {
// remember the first path that is set
firstSetPath = p
}
info, err := os.Stat(p)
if err != nil {
continue
}
if len(firstExistingPath) == 0 {
// remember the first path that exists
firstExistingPath = p
}
if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
// return first path that is writeable
return p
}
}
// If none are writeable, return first location that exists
if len(firstExistingPath) > 0 {
return firstExistingPath
}
// If none exist, return first location that is set
if len(firstSetPath) > 0 {
return firstSetPath
}
// We've got nothing
return ""
}
return os.Getenv("HOME")
}

6
e2e/vendor/k8s.io/client-go/util/keyutil/OWNERS generated vendored Normal file
View File

@ -0,0 +1,6 @@
approvers:
- sig-auth-certificates-approvers
reviewers:
- sig-auth-certificates-reviewers
labels:
- sig/auth

322
e2e/vendor/k8s.io/client-go/util/keyutil/key.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package keyutil contains utilities for managing public/private key pairs.
package keyutil
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"path/filepath"
)
const (
// ECPrivateKeyBlockType is a possible value for pem.Block.Type.
ECPrivateKeyBlockType = "EC PRIVATE KEY"
// RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
// PrivateKeyBlockType is a possible value for pem.Block.Type.
PrivateKeyBlockType = "PRIVATE KEY"
// PublicKeyBlockType is a possible value for pem.Block.Type.
PublicKeyBlockType = "PUBLIC KEY"
)
// MakeEllipticPrivateKeyPEM creates an ECDSA private key
func MakeEllipticPrivateKeyPEM() ([]byte, error) {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
if err != nil {
return nil, err
}
derBytes, err := x509.MarshalECPrivateKey(privateKey)
if err != nil {
return nil, err
}
privateKeyPemBlock := &pem.Block{
Type: ECPrivateKeyBlockType,
Bytes: derBytes,
}
return pem.EncodeToMemory(privateKeyPemBlock), nil
}
// WriteKey writes the pem-encoded key data to keyPath.
// The key file will be created with file mode 0600.
// If the key file already exists, it will be overwritten.
// The parent directory of the keyPath will be created as needed with file mode 0755.
func WriteKey(keyPath string, data []byte) error {
if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
return err
}
return os.WriteFile(keyPath, data, os.FileMode(0600))
}
// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
// can't find one, it will generate a new key and store it there.
func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
loadedData, err := os.ReadFile(keyPath)
// Call verifyKeyData to ensure the file wasn't empty/corrupt.
if err == nil && verifyKeyData(loadedData) {
return loadedData, false, err
}
if !os.IsNotExist(err) {
return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
}
generatedData, err := MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, false, fmt.Errorf("error generating key: %v", err)
}
if err := WriteKey(keyPath, generatedData); err != nil {
return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
}
return generatedData, true, nil
}
// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
// a PEM encoded block or returns an error.
func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
switch t := privateKey.(type) {
case *ecdsa.PrivateKey:
derBytes, err := x509.MarshalECPrivateKey(t)
if err != nil {
return nil, err
}
block := &pem.Block{
Type: ECPrivateKeyBlockType,
Bytes: derBytes,
}
return pem.EncodeToMemory(block), nil
case *rsa.PrivateKey:
block := &pem.Block{
Type: RSAPrivateKeyBlockType,
Bytes: x509.MarshalPKCS1PrivateKey(t),
}
return pem.EncodeToMemory(block), nil
default:
return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
}
}
// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
// Returns an error if the file could not be read or if the private key could not be parsed.
func PrivateKeyFromFile(file string) (interface{}, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
key, err := ParsePrivateKeyPEM(data)
if err != nil {
return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
}
return key, nil
}
// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
// Reads public keys from both public and private key files.
func PublicKeysFromFile(file string) ([]interface{}, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
keys, err := ParsePublicKeysPEM(data)
if err != nil {
return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
}
return keys, nil
}
// verifyKeyData returns true if the provided data appears to be a valid private key.
func verifyKeyData(data []byte) bool {
if len(data) == 0 {
return false
}
_, err := ParsePrivateKeyPEM(data)
return err == nil
}
// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
var privateKeyPemBlock *pem.Block
for {
privateKeyPemBlock, keyData = pem.Decode(keyData)
if privateKeyPemBlock == nil {
break
}
switch privateKeyPemBlock.Type {
case ECPrivateKeyBlockType:
// ECDSA Private Key in ASN.1 format
if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
case RSAPrivateKeyBlockType:
// RSA Private Key in PKCS#1 format
if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
case PrivateKeyBlockType:
// RSA or ECDSA Private Key in unencrypted PKCS#8 format
if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
}
// tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
// originally, only the first PEM block was parsed and expected to be a key block
}
// we read all the PEM blocks and didn't recognize one
return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
}
// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
// Reads public keys from both public and private key files.
func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
var block *pem.Block
keys := []interface{}{}
for {
// read the next block
block, keyData = pem.Decode(keyData)
if block == nil {
break
}
// test block against parsing functions
if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
keys = append(keys, &privateKey.PublicKey)
continue
}
if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
keys = append(keys, publicKey)
continue
}
if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
keys = append(keys, &privateKey.PublicKey)
continue
}
if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
keys = append(keys, publicKey)
continue
}
// tolerate non-key PEM blocks for backwards compatibility
// originally, only the first PEM block was parsed and expected to be a key block
}
if len(keys) == 0 {
return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
}
return keys, nil
}
// parseRSAPublicKey parses a single RSA public key from the provided data
func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
if cert, err := x509.ParseCertificate(data); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
// Test if parsed key is an RSA Public Key
var pubKey *rsa.PublicKey
var ok bool
if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
}
return pubKey, nil
}
// parseRSAPrivateKey parses a single RSA private key from the provided data
func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
return nil, err
}
}
// Test if parsed key is an RSA Private Key
var privKey *rsa.PrivateKey
var ok bool
if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
}
return privKey, nil
}
// parseECPublicKey parses a single ECDSA public key from the provided data
func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
if cert, err := x509.ParseCertificate(data); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
// Test if parsed key is an ECDSA Public Key
var pubKey *ecdsa.PublicKey
var ok bool
if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
}
return pubKey, nil
}
// parseECPrivateKey parses a single ECDSA private key from the provided data
func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
return nil, err
}
// Test if parsed key is an ECDSA Private Key
var privKey *ecdsa.PrivateKey
var ok bool
if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
}
return privKey, nil
}

4
e2e/vendor/k8s.io/client-go/util/retry/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- caesarxuchao

105
e2e/vendor/k8s.io/client-go/util/retry/util.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package retry
import (
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
)
// DefaultRetry is the recommended retry for a conflict where multiple clients
// are making changes to the same resource.
var DefaultRetry = wait.Backoff{
Steps: 5,
Duration: 10 * time.Millisecond,
Factor: 1.0,
Jitter: 0.1,
}
// DefaultBackoff is the recommended backoff for a conflict where a client
// may be attempting to make an unrelated modification to a resource under
// active management by one or more controllers.
var DefaultBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
// OnError allows the caller to retry fn in case the error returned by fn is retriable
// according to the provided function. backoff defines the maximum retries and the wait
// interval between two retries.
func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error {
var lastErr error
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn()
switch {
case err == nil:
return true, nil
case retriable(err):
lastErr = err
return false, nil
default:
return false, err
}
})
if err == wait.ErrWaitTimeout {
err = lastErr
}
return err
}
// RetryOnConflict is used to make an update to a resource when you have to worry about
// conflicts caused by other code making unrelated updates to the resource at the same
// time. fn should fetch the resource to be modified, make appropriate changes to it, try
// to update it, and return (unmodified) the error from the update function. On a
// successful update, RetryOnConflict will return nil. If the update function returns a
// "Conflict" error, RetryOnConflict will wait some amount of time as described by
// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times
// and gives up, RetryOnConflict will return an error to the caller.
//
// err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// // Fetch the resource here; you need to refetch it on every try, since
// // if you got a conflict on the last update attempt then you need to get
// // the current version before making your own changes.
// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
// if err != nil {
// return err
// }
//
// // Make whatever updates to the resource are needed
// pod.Status.Phase = v1.PodFailed
//
// // Try to update
// _, err = c.Pods("mynamespace").UpdateStatus(pod)
// // You have to return err itself here (not wrapped inside another error)
// // so that RetryOnConflict can identify it correctly.
// return err
// })
// if err != nil {
// // May be conflict if max retries were hit, or may be something unrelated
// // like permissions or a network error
// return err
// }
// ...
//
// TODO: Make Backoff an interface?
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
return OnError(backoff, errors.IsConflict, fn)
}

View File

@ -0,0 +1,82 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watchlist
import (
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientfeatures "k8s.io/client-go/features"
"k8s.io/utils/ptr"
)
var scheme = runtime.NewScheme()
func init() {
utilruntime.Must(metainternalversion.AddToScheme(scheme))
}
// PrepareWatchListOptionsFromListOptions creates a new ListOptions
// that can be used for a watch-list request from the given listOptions.
//
// This function also determines if the given listOptions can be used to form a watch-list request,
// which would result in streaming semantically equivalent data from the server.
func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) {
if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
return metav1.ListOptions{}, false, nil
}
internalListOptions := &metainternalversion.ListOptions{}
if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil {
return metav1.ListOptions{}, false, err
}
if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 {
return metav1.ListOptions{}, false, nil
}
watchListOptions := listOptions
// this is our legacy case, the cache ignores LIMIT for
// ResourceVersion == 0 and RVM=unset|NotOlderThan
if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" {
return metav1.ListOptions{}, false, nil
}
watchListOptions.Limit = 0
// to ensure that we can create a watch-list request that returns
// semantically equivalent data for the given listOptions,
// we need to validate that the RVM for the list is supported by watch-list requests.
if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
return metav1.ListOptions{}, false, nil
}
watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan
watchListOptions.Watch = true
watchListOptions.AllowWatchBookmarks = true
watchListOptions.SendInitialEvents = ptr.To(true)
internalWatchListOptions := &metainternalversion.ListOptions{}
if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil {
return metav1.ListOptions{}, false, err
}
if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 {
return metav1.ListOptions{}, false, nil
}
return watchListOptions, true, nil
}

View File

@ -0,0 +1,295 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"math"
"sync"
"time"
"golang.org/x/time/rate"
)
// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead.
type RateLimiter TypedRateLimiter[any]
type TypedRateLimiter[T comparable] interface {
// When gets an item and gets to decide how long that item should wait
When(item T) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing
// or for success, we'll stop tracking it
Forget(item T)
// NumRequeues returns back how many failures the item has had
NumRequeues(item T) int
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
//
// Deprecated: Use DefaultTypedControllerRateLimiter instead.
func DefaultControllerRateLimiter() RateLimiter {
return DefaultTypedControllerRateLimiter[any]()
}
// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] {
return NewTypedMaxOfRateLimiter(
NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead.
type BucketRateLimiter = TypedBucketRateLimiter[any]
// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
type TypedBucketRateLimiter[T comparable] struct {
*rate.Limiter
}
var _ RateLimiter = &BucketRateLimiter{}
func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration {
return r.Limiter.Reserve().Delay()
}
func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int {
return 0
}
func (r *TypedBucketRateLimiter[T]) Forget(item T) {
}
// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead.
type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any]
// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
// dealing with max failures and expiration are up to the caller
type TypedItemExponentialFailureRateLimiter[T comparable] struct {
failuresLock sync.Mutex
failures map[T]int
baseDelay time.Duration
maxDelay time.Duration
}
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead.
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay)
}
func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] {
return &TypedItemExponentialFailureRateLimiter[T]{
failures: map[T]int{},
baseDelay: baseDelay,
maxDelay: maxDelay,
}
}
// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead.
func DefaultItemBasedRateLimiter() RateLimiter {
return DefaultTypedItemBasedRateLimiter[any]()
}
func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] {
return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second)
}
func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
exp := r.failures[item]
r.failures[item] = r.failures[item] + 1
// The backoff is capped such that 'calculated' value never overflows.
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
if backoff > math.MaxInt64 {
return r.maxDelay
}
calculated := time.Duration(backoff)
if calculated > r.maxDelay {
return r.maxDelay
}
return calculated
}
func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
// Deprecated: Use TypedItemFastSlowRateLimiter instead.
type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any]
// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
type TypedItemFastSlowRateLimiter[T comparable] struct {
failuresLock sync.Mutex
failures map[T]int
maxFastAttempts int
fastDelay time.Duration
slowDelay time.Duration
}
var _ RateLimiter = &ItemFastSlowRateLimiter{}
// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead.
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts)
}
func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] {
return &TypedItemFastSlowRateLimiter[T]{
failures: map[T]int{},
fastDelay: fastDelay,
slowDelay: slowDelay,
maxFastAttempts: maxFastAttempts,
}
}
func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
r.failures[item] = r.failures[item] + 1
if r.failures[item] <= r.maxFastAttempts {
return r.fastDelay
}
return r.slowDelay
}
func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
//
// Deprecated: Use TypedMaxOfRateLimiter instead.
type MaxOfRateLimiter = TypedMaxOfRateLimiter[any]
// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
type TypedMaxOfRateLimiter[T comparable] struct {
limiters []TypedRateLimiter[T]
}
func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration {
ret := time.Duration(0)
for _, limiter := range r.limiters {
curr := limiter.When(item)
if curr > ret {
ret = curr
}
}
return ret
}
// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead.
func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter {
return NewTypedMaxOfRateLimiter(limiters...)
}
func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] {
return &TypedMaxOfRateLimiter[T]{limiters: limiters}
}
func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int {
ret := 0
for _, limiter := range r.limiters {
curr := limiter.NumRequeues(item)
if curr > ret {
ret = curr
}
}
return ret
}
func (r *TypedMaxOfRateLimiter[T]) Forget(item T) {
for _, limiter := range r.limiters {
limiter.Forget(item)
}
}
// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long
// Deprecated: Use TypedWithMaxWaitRateLimiter instead.
type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any]
// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long
type TypedWithMaxWaitRateLimiter[T comparable] struct {
limiter TypedRateLimiter[T]
maxDelay time.Duration
}
// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead.
func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter {
return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay)
}
func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] {
return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay}
}
func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration {
delay := w.limiter.When(item)
if delay > w.maxDelay {
return w.maxDelay
}
return delay
}
func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) {
w.limiter.Forget(item)
}
func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int {
return w.limiter.NumRequeues(item)
}

View File

@ -0,0 +1,360 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"container/heap"
"sync"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
//
// Deprecated: use TypedDelayingInterface instead.
type DelayingInterface TypedDelayingInterface[any]
// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
type TypedDelayingInterface[T comparable] interface {
TypedInterface[T]
// AddAfter adds an item to the workqueue after the indicated duration has passed
AddAfter(item T, duration time.Duration)
}
// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
//
// Deprecated: use TypedDelayingQueueConfig instead.
type DelayingQueueConfig = TypedDelayingQueueConfig[any]
// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
type TypedDelayingQueueConfig[T comparable] struct {
// Name for the queue. If unnamed, the metrics will not be registered.
Name string
// MetricsProvider optionally allows specifying a metrics provider to use for the queue
// instead of the global provider.
MetricsProvider MetricsProvider
// Clock optionally allows injecting a real or fake clock for testing purposes.
Clock clock.WithTicker
// Queue optionally allows injecting custom queue Interface instead of the default one.
Queue TypedInterface[T]
}
// NewDelayingQueue constructs a new workqueue with delayed queuing ability.
// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewDelayingQueueWithConfig instead and specify a name.
//
// Deprecated: use NewTypedDelayingQueue instead.
func NewDelayingQueue() DelayingInterface {
return NewDelayingQueueWithConfig(DelayingQueueConfig{})
}
// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability.
// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewTypedDelayingQueueWithConfig instead and specify a name.
func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] {
return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{})
}
// NewDelayingQueueWithConfig constructs a new workqueue with options to
// customize different properties.
//
// Deprecated: use NewTypedDelayingQueueWithConfig instead.
func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface {
return NewTypedDelayingQueueWithConfig[any](config)
}
// TypedNewDelayingQueue exists for backwards compatibility only.
//
// Deprecated: use NewTypedDelayingQueueWithConfig instead.
func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
return NewTypedDelayingQueue[T]()
}
// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to
// customize different properties.
func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] {
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
if config.Queue == nil {
config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{
Name: config.Name,
MetricsProvider: config.MetricsProvider,
Clock: config.Clock,
})
}
return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider)
}
// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
// inject custom queue Interface instead of the default one
// Deprecated: Use NewDelayingQueueWithConfig instead.
func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
return NewDelayingQueueWithConfig(DelayingQueueConfig{
Name: name,
Queue: q,
})
}
// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability.
// Deprecated: Use NewDelayingQueueWithConfig instead.
func NewNamedDelayingQueue(name string) DelayingInterface {
return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name})
}
// NewDelayingQueueWithCustomClock constructs a new named workqueue
// with ability to inject real or fake clock for testing purposes.
// Deprecated: Use NewDelayingQueueWithConfig instead.
func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface {
return NewDelayingQueueWithConfig(DelayingQueueConfig{
Name: name,
Clock: clock,
})
}
func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] {
ret := &delayingType[T]{
TypedInterface: q,
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor[T], 1000),
metrics: newRetryMetrics(name, provider),
}
go ret.waitingLoop()
return ret
}
// delayingType wraps an Interface and provides delayed re-enquing
type delayingType[T comparable] struct {
TypedInterface[T]
// clock tracks time for delayed firing
clock clock.Clock
// stopCh lets us signal a shutdown to the waiting loop
stopCh chan struct{}
// stopOnce guarantees we only signal shutdown a single time
stopOnce sync.Once
// heartbeat ensures we wait no more than maxWait before firing
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
waitingForAddCh chan *waitFor[T]
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
type waitFor[T any] struct {
data T
readyAt time.Time
// index in the priority queue (heap)
index int
}
// waitForPriorityQueue implements a priority queue for waitFor items.
//
// waitForPriorityQueue implements heap.Interface. The item occurring next in
// time (i.e., the item with the smallest readyAt) is at the root (index 0).
// Peek returns this minimum item at index 0. Pop returns the minimum item after
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
type waitForPriorityQueue[T any] []*waitFor[T]
func (pq waitForPriorityQueue[T]) Len() int {
return len(pq)
}
func (pq waitForPriorityQueue[T]) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
func (pq waitForPriorityQueue[T]) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
func (pq *waitForPriorityQueue[T]) Push(x interface{}) {
n := len(*pq)
item := x.(*waitFor[T])
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
func (pq *waitForPriorityQueue[T]) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
*pq = (*pq)[0:(n - 1)]
return item
}
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
func (pq waitForPriorityQueue[T]) Peek() interface{} {
return pq[0]
}
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
// on Get() will be true. This method may be invoked more than once.
func (q *delayingType[T]) ShutDown() {
q.stopOnce.Do(func() {
q.TypedInterface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
})
}
// AddAfter adds the given item to the work queue after the given delay
func (q *delayingType[T]) AddAfter(item T, duration time.Duration) {
// don't add if we're already shutting down
if q.ShuttingDown() {
return
}
q.metrics.retry()
// immediately add things with no delay
if duration <= 0 {
q.Add(item)
return
}
select {
case <-q.stopCh:
// unblock if ShutDown() is called
case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
// expired item sitting for more than 10 seconds.
const maxWait = 10 * time.Second
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
func (q *delayingType[T]) waitingLoop() {
defer utilruntime.HandleCrash()
// Make a placeholder channel to use when there are no items in our list
never := make(<-chan time.Time)
// Make a timer that expires when the item at the head of the waiting queue is ready
var nextReadyAtTimer clock.Timer
waitingForQueue := &waitForPriorityQueue[T]{}
heap.Init(waitingForQueue)
waitingEntryByData := map[T]*waitFor[T]{}
for {
if q.TypedInterface.ShuttingDown() {
return
}
now := q.clock.Now()
// Add ready entries
for waitingForQueue.Len() > 0 {
entry := waitingForQueue.Peek().(*waitFor[T])
if entry.readyAt.After(now) {
break
}
entry = heap.Pop(waitingForQueue).(*waitFor[T])
q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
// Set up a wait for the first item's readyAt (if one exists)
nextReadyAt := never
if waitingForQueue.Len() > 0 {
if nextReadyAtTimer != nil {
nextReadyAtTimer.Stop()
}
entry := waitingForQueue.Peek().(*waitFor[T])
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
nextReadyAt = nextReadyAtTimer.C()
}
select {
case <-q.stopCh:
return
case <-q.heartbeat.C():
// continue the loop, which will add ready items
case <-nextReadyAt:
// continue the loop, which will add ready items
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
drained := false
for !drained {
select {
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
default:
drained = true
}
}
}
}
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
if existing.readyAt.After(entry.readyAt) {
existing.readyAt = entry.readyAt
heap.Fix(q, existing.index)
}
return
}
heap.Push(q, entry)
knownEntries[entry.data] = entry
}

26
e2e/vendor/k8s.io/client-go/util/workqueue/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package workqueue provides a simple queue that supports the following
// features:
// - Fair: items processed in the order in which they are added.
// - Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// - Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// - Shutdown notifications.
package workqueue // import "k8s.io/client-go/util/workqueue"

255
e2e/vendor/k8s.io/client-go/util/workqueue/metrics.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/utils/clock"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type queueMetrics[T comparable] interface {
add(item T)
get(item T)
done(item T)
updateUnfinishedWork()
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Inc()
Dec()
}
// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
type SettableGaugeMetric interface {
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// SummaryMetric captures individual observations.
type SummaryMetric interface {
Observe(float64)
}
// HistogramMetric counts individual observations.
type HistogramMetric interface {
Observe(float64)
}
type noopMetric struct{}
func (noopMetric) Inc() {}
func (noopMetric) Dec() {}
func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
type defaultQueueMetrics[T comparable] struct {
clock clock.Clock
// current depth of a workqueue
depth GaugeMetric
// total number of adds handled by a workqueue
adds CounterMetric
// how long an item stays in a workqueue
latency HistogramMetric
// how long processing an item from a workqueue takes
workDuration HistogramMetric
addTimes map[T]time.Time
processingStartTimes map[T]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
func (m *defaultQueueMetrics[T]) add(item T) {
if m == nil {
return
}
m.adds.Inc()
m.depth.Inc()
if _, exists := m.addTimes[item]; !exists {
m.addTimes[item] = m.clock.Now()
}
}
func (m *defaultQueueMetrics[T]) get(item T) {
if m == nil {
return
}
m.depth.Dec()
m.processingStartTimes[item] = m.clock.Now()
if startTime, exists := m.addTimes[item]; exists {
m.latency.Observe(m.sinceInSeconds(startTime))
delete(m.addTimes, item)
}
}
func (m *defaultQueueMetrics[T]) done(item T) {
if m == nil {
return
}
if startTime, exists := m.processingStartTimes[item]; exists {
m.workDuration.Observe(m.sinceInSeconds(startTime))
delete(m.processingStartTimes, item)
}
}
func (m *defaultQueueMetrics[T]) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
var oldest float64
for _, t := range m.processingStartTimes {
age := m.sinceInSeconds(t)
total += age
if age > oldest {
oldest = age
}
}
m.unfinishedWorkSeconds.Set(total)
m.longestRunningProcessor.Set(oldest)
}
type noMetrics[T any] struct{}
func (noMetrics[T]) add(item T) {}
func (noMetrics[T]) get(item T) {}
func (noMetrics[T]) done(item T) {}
func (noMetrics[T]) updateUnfinishedWork() {}
// Gets the time since the specified start in seconds.
func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
type retryMetrics interface {
retry()
}
type defaultRetryMetrics struct {
retries CounterMetric
}
func (m *defaultRetryMetrics) retry() {
if m == nil {
return
}
m.retries.Inc()
}
// MetricsProvider generates various metrics used by the queue.
type MetricsProvider interface {
NewDepthMetric(name string) GaugeMetric
NewAddsMetric(name string) CounterMetric
NewLatencyMetric(name string) HistogramMetric
NewWorkDurationMetric(name string) HistogramMetric
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
NewRetriesMetric(name string) CounterMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
var globalMetricsProvider MetricsProvider = noopMetricsProvider{}
var setGlobalMetricsProviderOnce sync.Once
func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] {
if len(name) == 0 || mp == (noopMetricsProvider{}) {
return noMetrics[T]{}
}
return &defaultQueueMetrics[T]{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
addTimes: map[T]time.Time{},
processingStartTimes: map[T]time.Time{},
}
}
func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
var ret *defaultRetryMetrics
if len(name) == 0 {
return ret
}
if provider == nil {
provider = globalMetricsProvider
}
return &defaultRetryMetrics{
retries: provider.NewRetriesMetric(name),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
setGlobalMetricsProviderOnce.Do(func() {
globalMetricsProvider = metricsProvider
})
}

View File

@ -0,0 +1,101 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"context"
"sync"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
type DoWorkPieceFunc func(piece int)
type options struct {
chunkSize int
}
type Options func(*options)
// WithChunkSize allows to set chunks of work items to the workers, rather than
// processing one by one.
// It is recommended to use this option if the number of pieces significantly
// higher than the number of workers and the work done for each item is small.
func WithChunkSize(c int) func(*options) {
return func(o *options) {
o.chunkSize = c
}
}
// ParallelizeUntil is a framework that allows for parallelizing N
// independent pieces of work until done or the context is canceled.
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
if pieces == 0 {
return
}
o := options{}
for _, opt := range opts {
opt(&o)
}
chunkSize := o.chunkSize
if chunkSize < 1 {
chunkSize = 1
}
chunks := ceilDiv(pieces, chunkSize)
toProcess := make(chan int, chunks)
for i := 0; i < chunks; i++ {
toProcess <- i
}
close(toProcess)
var stop <-chan struct{}
if ctx != nil {
stop = ctx.Done()
}
if chunks < workers {
workers = chunks
}
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer utilruntime.HandleCrash()
defer wg.Done()
for chunk := range toProcess {
start := chunk * chunkSize
end := start + chunkSize
if end > pieces {
end = pieces
}
for p := start; p < end; p++ {
select {
case <-stop:
return
default:
doWorkPiece(p)
}
}
}
}()
}
wg.Wait()
}
func ceilDiv(a, b int) int {
return (a + b - 1) / b
}

369
e2e/vendor/k8s.io/client-go/util/workqueue/queue.go generated vendored Normal file
View File

@ -0,0 +1,369 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/utils/clock"
)
// Deprecated: Interface is deprecated, use TypedInterface instead.
type Interface TypedInterface[any]
type TypedInterface[T comparable] interface {
Add(item T)
Len() int
Get() (item T, shutdown bool)
Done(item T)
ShutDown()
ShutDownWithDrain()
ShuttingDown() bool
}
// Queue is the underlying storage for items. The functions below are always
// called from the same goroutine.
type Queue[T comparable] interface {
// Touch can be hooked when an existing item is added again. This may be
// useful if the implementation allows priority change for the given item.
Touch(item T)
// Push adds a new item.
Push(item T)
// Len tells the total number of items.
Len() int
// Pop retrieves an item.
Pop() (item T)
}
// DefaultQueue is a slice based FIFO queue.
func DefaultQueue[T comparable]() Queue[T] {
return new(queue[T])
}
// queue is a slice which implements Queue.
type queue[T comparable] []T
func (q *queue[T]) Touch(item T) {}
func (q *queue[T]) Push(item T) {
*q = append(*q, item)
}
func (q *queue[T]) Len() int {
return len(*q)
}
func (q *queue[T]) Pop() (item T) {
item = (*q)[0]
// The underlying array still exists and reference this object, so the object will not be garbage collected.
(*q)[0] = *new(T)
*q = (*q)[1:]
return item
}
// QueueConfig specifies optional configurations to customize an Interface.
// Deprecated: use TypedQueueConfig instead.
type QueueConfig = TypedQueueConfig[any]
type TypedQueueConfig[T comparable] struct {
// Name for the queue. If unnamed, the metrics will not be registered.
Name string
// MetricsProvider optionally allows specifying a metrics provider to use for the queue
// instead of the global provider.
MetricsProvider MetricsProvider
// Clock ability to inject real or fake clock for testing purposes.
Clock clock.WithTicker
// Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue.
Queue Queue[T]
}
// New constructs a new work queue (see the package comment).
//
// Deprecated: use NewTyped instead.
func New() *Type {
return NewWithConfig(QueueConfig{
Name: "",
})
}
// NewTyped constructs a new work queue (see the package comment).
func NewTyped[T comparable]() *Typed[T] {
return NewTypedWithConfig(TypedQueueConfig[T]{
Name: "",
})
}
// NewWithConfig constructs a new workqueue with ability to
// customize different properties.
//
// Deprecated: use NewTypedWithConfig instead.
func NewWithConfig(config QueueConfig) *Type {
return NewTypedWithConfig(config)
}
// NewTypedWithConfig constructs a new workqueue with ability to
// customize different properties.
func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] {
return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod)
}
// NewNamed creates a new named queue.
// Deprecated: Use NewWithConfig instead.
func NewNamed(name string) *Type {
return NewWithConfig(QueueConfig{
Name: name,
})
}
// newQueueWithConfig constructs a new named workqueue
// with the ability to customize different properties for testing purposes
func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] {
metricsProvider := globalMetricsProvider
if config.MetricsProvider != nil {
metricsProvider = config.MetricsProvider
}
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
if config.Queue == nil {
config.Queue = DefaultQueue[T]()
}
return newQueue(
config.Clock,
config.Queue,
newQueueMetrics[T](metricsProvider, config.Name, config.Clock),
updatePeriod,
)
}
func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] {
t := &Typed[T]{
clock: c,
queue: queue,
dirty: set[T]{},
processing: set[T]{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
}
// Don't start the goroutine for a type of noMetrics so we don't consume
// resources unnecessarily
if _, ok := metrics.(noMetrics[T]); !ok {
go t.updateUnfinishedWorkLoop()
}
return t
}
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
// Deprecated: Use Typed instead.
type Type = Typed[any]
type Typed[t comparable] struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
queue Queue[t]
// dirty defines all of the items that need to be processed.
dirty set[t]
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing set[t]
cond *sync.Cond
shuttingDown bool
drain bool
metrics queueMetrics[t]
unfinishedWorkUpdatePeriod time.Duration
clock clock.WithTicker
}
type empty struct{}
type set[t comparable] map[t]empty
func (s set[t]) has(item t) bool {
_, exists := s[item]
return exists
}
func (s set[t]) insert(item t) {
s[item] = empty{}
}
func (s set[t]) delete(item t) {
delete(s, item)
}
func (s set[t]) len() int {
return len(s)
}
// Add marks item as needing processing.
func (q *Typed[T]) Add(item T) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
// the same item is added again before it is processed, call the Touch
// function if the queue cares about it (for e.g, reset its priority)
if !q.processing.has(item) {
q.queue.Touch(item)
}
return
}
q.metrics.add(item)
q.dirty.insert(item)
if q.processing.has(item) {
return
}
q.queue.Push(item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
func (q *Typed[T]) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.queue.Len()
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
func (q *Typed[T]) Get() (item T, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
for q.queue.Len() == 0 && !q.shuttingDown {
q.cond.Wait()
}
if q.queue.Len() == 0 {
// We must be shutting down.
return *new(T), true
}
item = q.queue.Pop()
q.metrics.get(item)
q.processing.insert(item)
q.dirty.delete(item)
return item, false
}
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
func (q *Typed[T]) Done(item T) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.metrics.done(item)
q.processing.delete(item)
if q.dirty.has(item) {
q.queue.Push(item)
q.cond.Signal()
} else if q.processing.len() == 0 {
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it and
// immediately instruct the worker goroutines to exit.
func (q *Typed[T]) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.drain = false
q.shuttingDown = true
q.cond.Broadcast()
}
// ShutDownWithDrain will cause q to ignore all new items added to it. As soon
// as the worker goroutines have "drained", i.e: finished processing and called
// Done on all existing items in the queue; they will be instructed to exit and
// ShutDownWithDrain will return. Hence: a strict requirement for using this is;
// your workers must ensure that Done is called on all items in the queue once
// the shut down has been initiated, if that is not the case: this will block
// indefinitely. It is, however, safe to call ShutDown after having called
// ShutDownWithDrain, as to force the queue shut down to terminate immediately
// without waiting for the drainage.
func (q *Typed[T]) ShutDownWithDrain() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.drain = true
q.shuttingDown = true
q.cond.Broadcast()
for q.processing.len() != 0 && q.drain {
q.cond.Wait()
}
}
func (q *Typed[T]) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
func (q *Typed[T]) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
if !func() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.shuttingDown {
q.metrics.updateUnfinishedWork()
return true
}
return false
}() {
return
}
}
}

View File

@ -0,0 +1,147 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import "k8s.io/utils/clock"
// RateLimitingInterface is an interface that rate limits items being added to the queue.
//
// Deprecated: Use TypedRateLimitingInterface instead.
type RateLimitingInterface TypedRateLimitingInterface[any]
// TypedRateLimitingInterface is an interface that rate limits items being added to the queue.
type TypedRateLimitingInterface[T comparable] interface {
TypedDelayingInterface[T]
// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
AddRateLimited(item T)
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.
Forget(item T)
// NumRequeues returns back how many times the item was requeued
NumRequeues(item T) int
}
// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface.
//
// Deprecated: Use TypedRateLimitingQueueConfig instead.
type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any]
// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface.
type TypedRateLimitingQueueConfig[T comparable] struct {
// Name for the queue. If unnamed, the metrics will not be registered.
Name string
// MetricsProvider optionally allows specifying a metrics provider to use for the queue
// instead of the global provider.
MetricsProvider MetricsProvider
// Clock optionally allows injecting a real or fake clock for testing purposes.
Clock clock.WithTicker
// DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one.
DelayingQueue TypedDelayingInterface[T]
}
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
// NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewRateLimitingQueueWithConfig instead and specify a name.
//
// Deprecated: Use NewTypedRateLimitingQueue instead.
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{})
}
// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewTypedRateLimitingQueueWithConfig instead and specify a name.
func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] {
return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{})
}
// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
// with options to customize different properties.
// Remember to call Forget! If you don't, you may end up tracking failures forever.
//
// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead.
func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface {
return NewTypedRateLimitingQueueWithConfig(rateLimiter, config)
}
// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
// with options to customize different properties.
// Remember to call Forget! If you don't, you may end up tracking failures forever.
func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] {
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
if config.DelayingQueue == nil {
config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{
Name: config.Name,
MetricsProvider: config.MetricsProvider,
Clock: config.Clock,
})
}
return &rateLimitingType[T]{
TypedDelayingInterface: config.DelayingQueue,
rateLimiter: rateLimiter,
}
}
// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability.
// Deprecated: Use NewRateLimitingQueueWithConfig instead.
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{
Name: name,
})
}
// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability
// with the option to inject a custom delaying queue instead of the default one.
// Deprecated: Use NewRateLimitingQueueWithConfig instead.
func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface {
return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{
DelayingQueue: di,
})
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
type rateLimitingType[T comparable] struct {
TypedDelayingInterface[T]
rateLimiter TypedRateLimiter[T]
}
// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
func (q *rateLimitingType[T]) AddRateLimited(item T) {
q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item))
}
func (q *rateLimitingType[T]) NumRequeues(item T) int {
return q.rateLimiter.NumRequeues(item)
}
func (q *rateLimitingType[T]) Forget(item T) {
q.rateLimiter.Forget(item)
}