rebase: update kubernetes to 1.30

updating kubernetes to 1.30 release

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2024-05-15 08:54:18 +02:00
committed by mergify[bot]
parent 62ddcf715b
commit e727bd351e
747 changed files with 73809 additions and 10436 deletions

View File

@ -116,4 +116,4 @@ limitations under the License.
// queues virtual start time is advanced by G. When a request
// finishes being served, and the actual service time was S, the
// queues virtual start time is decremented by G - S.
package queueset
package queueset // import "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset"

View File

@ -33,11 +33,6 @@ import (
fqrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
"k8s.io/apiserver/pkg/util/shufflesharding"
"k8s.io/klog/v2"
// The following hack is needed to work around a tooling deficiency.
// Packages imported only for test code are not included in vendor.
// See https://kubernetes.slack.com/archives/C0EG7JC6T/p1626985671458800?thread_ts=1626983387.450800&cid=C0EG7JC6T
_ "k8s.io/utils/clock/testing"
)
const nsTimeFmt = "2006-01-02 15:04:05.000000000"
@ -792,11 +787,11 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
queue := qs.queues[qs.robinIndex]
oldestWaiting, _ := queue.requestsWaiting.Peek()
if oldestWaiting != nil {
sMin = ssMin(sMin, queue.nextDispatchR)
sMax = ssMax(sMax, queue.nextDispatchR)
sMin = min(sMin, queue.nextDispatchR)
sMax = max(sMax, queue.nextDispatchR)
estimatedWorkInProgress := fqrequest.SeatsTimesDuration(float64(queue.seatsInUse), qs.estimatedServiceDuration)
dsMin = ssMin(dsMin, queue.nextDispatchR-estimatedWorkInProgress)
dsMax = ssMax(dsMax, queue.nextDispatchR-estimatedWorkInProgress)
dsMin = min(dsMin, queue.nextDispatchR-estimatedWorkInProgress)
dsMax = max(dsMax, queue.nextDispatchR-estimatedWorkInProgress)
currentVirtualFinish := queue.nextDispatchR + oldestWaiting.totalWork()
klog.V(11).InfoS("Considering queue to dispatch", "queueSet", qs.qCfg.Name, "queue", qs.robinIndex, "finishR", currentVirtualFinish)
if currentVirtualFinish < minVirtualFinish {
@ -848,20 +843,6 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
return minQueue, oldestReqFromMinQueue
}
func ssMin(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds {
if a > b {
return b
}
return a
}
func ssMax(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds {
if a < b {
return b
}
return a
}
// finishRequestAndDispatchAsMuchAsPossible is a convenience method
// which calls finishRequest for a given request and then dispatches
// as many requests as possible. This is all of what needs to be done

View File

@ -24,6 +24,7 @@ import (
"net"
"net/url"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -32,6 +33,7 @@ import (
"k8s.io/apiserver/pkg/util/x509metrics"
"k8s.io/client-go/rest"
"k8s.io/utils/lru"
netutils "k8s.io/utils/net"
)
const (
@ -128,7 +130,20 @@ func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
return client.(*rest.RESTClient), nil
}
complete := func(cfg *rest.Config) (*rest.RESTClient, error) {
cfg, err := cm.hookClientConfig(cc)
if err != nil {
return nil, err
}
client, err := rest.UnversionedRESTClientFor(cfg)
if err == nil {
cm.cache.Add(string(cacheKey), client)
}
return client, err
}
func (cm *ClientManager) hookClientConfig(cc ClientConfig) (*rest.Config, error) {
complete := func(cfg *rest.Config) (*rest.Config, error) {
// Avoid client-side rate limiting talking to the webhook backend.
// Rate limiting should happen when deciding how many requests to serve.
cfg.QPS = -1
@ -139,11 +154,6 @@ func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
}
cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, cc.CABundle...)
// Use http/1.1 instead of http/2.
// This is a workaround for http/2-enabled clients not load-balancing concurrent requests to multiple backends.
// See https://issue.k8s.io/75791 for details.
cfg.NextProtos = []string{"http/1.1"}
cfg.ContentConfig.NegotiatedSerializer = cm.negotiatedSerializer
cfg.ContentConfig.ContentType = runtime.ContentTypeJSON
@ -153,12 +163,7 @@ func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
x509MissingSANCounter,
x509InsecureSHA1Counter,
))
client, err := rest.UnversionedRESTClientFor(cfg)
if err == nil {
cm.cache.Add(string(cacheKey), client)
}
return client, err
return cfg, nil
}
if cc.Service != nil {
@ -173,6 +178,12 @@ func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
return nil, err
}
cfg := rest.CopyConfig(restConfig)
// Use http/1.1 instead of http/2.
// This is a workaround for http/2-enabled clients not load-balancing concurrent requests to multiple backends.
// See https://issue.k8s.io/75791 for details.
cfg.NextProtos = []string{"http/1.1"}
serverName := cc.Service.Name + "." + cc.Service.Namespace + ".svc"
host := net.JoinHostPort(serverName, strconv.Itoa(int(port)))
@ -225,6 +236,22 @@ func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
cfg := rest.CopyConfig(restConfig)
cfg.Host = u.Scheme + "://" + u.Host
cfg.APIPath = u.Path
if !isLocalHost(u) {
cfg.NextProtos = []string{"http/1.1"}
}
return complete(cfg)
}
func isLocalHost(u *url.URL) bool {
host := u.Hostname()
if strings.EqualFold(host, "localhost") {
return true
}
netIP := netutils.ParseIPSloppy(host)
if netIP != nil {
return netIP.IsLoopback()
}
return false
}