rebase: update kubernetes and libraries to v1.22.0 version

Kubernetes v1.22 version has been released and this update
ceph csi dependencies to use the same version.

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal
2021-08-09 12:49:24 +05:30
committed by mergify[bot]
parent e077c1fdf5
commit aa698bc3e1
759 changed files with 61864 additions and 6514 deletions

View File

@ -8,7 +8,6 @@ reviewers:
- deads2k
- brendandburns
- liggitt
- erictune
- sttts
- luxas
- dims

View File

@ -93,7 +93,6 @@ type Request struct {
rateLimiter flowcontrol.RateLimiter
backoff BackoffManager
timeout time.Duration
maxRetries int
// generic components accessible via method setters
verb string
@ -110,8 +109,9 @@ type Request struct {
subresource string
// output
err error
body io.Reader
err error
body io.Reader
retry WithRetry
}
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
@ -142,7 +142,7 @@ func NewRequest(c *RESTClient) *Request {
backoff: backoff,
timeout: timeout,
pathPrefix: pathPrefix,
maxRetries: 10,
retry: &withRetry{maxRetries: 10},
warningHandler: c.warningHandler,
}
@ -408,10 +408,7 @@ func (r *Request) Timeout(d time.Duration) *Request {
// function is specifically called with a different value.
// A zero maxRetries prevent it from doing retires and return an error immediately.
func (r *Request) MaxRetries(maxRetries int) *Request {
if maxRetries < 0 {
maxRetries = 0
}
r.maxRetries = maxRetries
r.retry.SetMaxRetries(maxRetries)
return r
}
@ -602,7 +599,7 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err
if latency > extraLongThrottleLatency {
// If the rate limiter latency is very high, the log message should be printed at a higher log level,
// but we use a throttled logger to prevent spamming.
globalThrottledLogger.Infof(message)
globalThrottledLogger.Infof("%s", message)
}
metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency)
@ -678,43 +675,88 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
return nil, r.err
}
url := r.URL().String()
req, err := http.NewRequest(r.verb, url, r.body)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
req.Header = r.headers
client := r.c.Client
if client == nil {
client = http.DefaultClient
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if r.c.base != nil {
if err != nil {
r.backoff.UpdateBackoff(r.c.base, err, 0)
} else {
r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode)
}
}
if err != nil {
isErrRetryableFunc := func(request *http.Request, err error) bool {
// The watch stream mechanism handles many common partial data errors, so closed
// connections can be retried in many cases.
if net.IsProbableEOF(err) || net.IsTimeout(err) {
return watch.NewEmptyWatch(), nil
return true
}
return nil, err
return false
}
if resp.StatusCode != http.StatusOK {
defer resp.Body.Close()
if result := r.transformResponse(resp, req); result.err != nil {
return nil, result.err
var retryAfter *RetryAfter
url := r.URL().String()
for {
req, err := r.newHTTPRequest(ctx)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return nil, err
}
retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if r.c.base != nil {
if err != nil {
r.backoff.UpdateBackoff(r.c.base, err, 0)
} else {
r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode)
}
}
if err == nil && resp.StatusCode == http.StatusOK {
return r.newStreamWatcher(resp)
}
done, transformErr := func() (bool, error) {
defer readAndCloseResponseBody(resp)
var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, isErrRetryableFunc)
if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
if err == nil {
return false, nil
}
klog.V(4).Infof("Could not retry request - %v", err)
}
if resp == nil {
// the server must have sent us an error in 'err'
return true, nil
}
if result := r.transformResponse(resp, req); result.err != nil {
return true, result.err
}
return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
}()
if done {
if isErrRetryableFunc(req, err) {
return watch.NewEmptyWatch(), nil
}
if err == nil {
// if the server sent us an HTTP Response object,
// we need to return the error object from that.
err = transformErr
}
return nil, err
}
}
}
func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) {
contentType := resp.Header.Get("Content-Type")
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
@ -769,49 +811,75 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
return nil, err
}
url := r.URL().String()
req, err := http.NewRequest(r.verb, url, nil)
if err != nil {
return nil, err
}
if r.body != nil {
req.Body = ioutil.NopCloser(r.body)
}
req = req.WithContext(ctx)
req.Header = r.headers
client := r.c.Client
if client == nil {
client = http.DefaultClient
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if r.c.base != nil {
var retryAfter *RetryAfter
url := r.URL().String()
for {
req, err := r.newHTTPRequest(ctx)
if err != nil {
r.backoff.UpdateBackoff(r.URL(), err, 0)
} else {
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
return nil, err
}
}
if err != nil {
return nil, err
}
switch {
case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
handleWarnings(resp.Header, r.warningHandler)
return resp.Body, nil
default:
// ensure we close the body before returning the error
defer resp.Body.Close()
result := r.transformResponse(resp, req)
err := result.Error()
if err == nil {
err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
if r.body != nil {
req.Body = ioutil.NopCloser(r.body)
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return nil, err
}
retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if r.c.base != nil {
if err != nil {
r.backoff.UpdateBackoff(r.URL(), err, 0)
} else {
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
}
}
if err != nil {
// we only retry on an HTTP response with 'Retry-After' header
return nil, err
}
switch {
case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
handleWarnings(resp.Header, r.warningHandler)
return resp.Body, nil
default:
done, transformErr := func() (bool, error) {
defer resp.Body.Close()
var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, neverRetryError)
if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
if err == nil {
return false, nil
}
klog.V(4).Infof("Could not retry request - %v", err)
}
result := r.transformResponse(resp, req)
if err := result.Error(); err != nil {
return true, err
}
return true, fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
}()
if done {
return nil, transformErr
}
}
return nil, err
}
}
@ -842,6 +910,17 @@ func (r *Request) requestPreflightCheck() error {
return nil
}
func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
url := r.URL().String()
req, err := http.NewRequest(r.verb, url, r.body)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
req.Header = r.headers
return req, nil
}
// request connects to the server and invokes the provided function when a server response is
// received. It handles retry behavior and up front validation of requests. It will invoke
// fn at most once. It will return an error if a problem occurred prior to connecting to the
@ -881,27 +960,22 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
}
// Right now we make about ten retry attempts if we get a Retry-After response.
retries := 0
var retryInfo string
var retryAfter *RetryAfter
for {
url := r.URL().String()
req, err := http.NewRequest(r.verb, url, r.body)
req, err := r.newHTTPRequest(ctx)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header = r.headers
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
if retries > 0 {
if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
if err := r.tryThrottleWithInfo(ctx, retryInfo); err != nil {
if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return err
}
retryInfo = ""
retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
@ -910,61 +984,45 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
} else {
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
}
if err != nil {
// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
// Thus in case of "GET" operations, we simply retry it.
// We are not automatically retrying "write" operations, as
// they are not idempotent.
if r.verb != "GET" {
return err
}
// For connection errors and apiserver shutdown errors retry.
if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
// For the purpose of retry, we set the artificial "retry-after" response.
// TODO: Should we clean the original response if it exists?
resp = &http.Response{
StatusCode: http.StatusInternalServerError,
Header: http.Header{"Retry-After": []string{"1"}},
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
} else {
return err
}
}
done := func() bool {
// Ensure the response body is fully read and closed
// before we reconnect, so that we reuse the same TCP
// connection.
defer func() {
const maxBodySlurpSize = 2 << 10
if resp.ContentLength <= maxBodySlurpSize {
io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
}
resp.Body.Close()
}()
defer readAndCloseResponseBody(resp)
retries++
if seconds, wait := checkWait(resp); wait && retries <= r.maxRetries {
retryInfo = getRetryReason(retries, seconds, resp, err)
if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
_, err := seeker.Seek(0, 0)
if err != nil {
klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
fn(req, resp)
return true
}
// if the the server returns an error in err, the response will be nil.
f := func(req *http.Request, resp *http.Response) {
if resp == nil {
return
}
klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
r.backoff.Sleep(time.Duration(seconds) * time.Second)
return false
fn(req, resp)
}
fn(req, resp)
var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, func(req *http.Request, err error) bool {
// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
// Thus in case of "GET" operations, we simply retry it.
// We are not automatically retrying "write" operations, as they are not idempotent.
if r.verb != "GET" {
return false
}
// For connection errors and apiserver shutdown errors retry.
if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
return true
}
return false
})
if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String(), r.body)
if err == nil {
return false
}
klog.V(4).Infof("Could not retry request - %v", err)
}
f(req, resp)
return true
}()
if done {
return nil
return err
}
}
}
@ -1019,13 +1077,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
// 3. Apiserver closes connection.
// 4. client-go should catch this and return an error.
klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err)
return Result{
err: streamErr,
}
default:
klog.Errorf("Unexpected error when reading response body: %v", err)
unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err)
unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err)
return Result{
err: unexpectedErr,
}
@ -1196,19 +1254,6 @@ func isTextResponse(resp *http.Response) bool {
return strings.HasPrefix(media, "text/")
}
// checkWait returns true along with a number of seconds if the server instructed us to wait
// before retrying.
func checkWait(resp *http.Response) (int, bool) {
switch r := resp.StatusCode; {
// any 500 error code and 429 can trigger a wait
case r == http.StatusTooManyRequests, r >= 500:
default:
return 0, false
}
i, ok := retryAfterSeconds(resp)
return i, ok
}
// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
// the header was missing or not a valid number.
func retryAfterSeconds(resp *http.Response) (int, bool) {
@ -1220,26 +1265,6 @@ func retryAfterSeconds(resp *http.Response) (int, bool) {
return 0, false
}
func getRetryReason(retries, seconds int, resp *http.Response, err error) string {
// priority and fairness sets the UID of the FlowSchema associated with a request
// in the following response Header.
const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds)
switch {
case resp.StatusCode == http.StatusTooManyRequests:
// it is server-side throttling from priority and fairness
flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID)
return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID)
case err != nil:
// it's a retriable error
return fmt.Sprintf("%s - retry-reason: due to retriable error, error: %v", message, err)
default:
return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode)
}
}
// Result contains the result of calling Request.Do().
type Result struct {
body []byte

232
vendor/k8s.io/client-go/rest/with_retry.go generated vendored Normal file
View File

@ -0,0 +1,232 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
"k8s.io/klog/v2"
)
// IsRetryableErrorFunc allows the client to provide its own function
// that determines whether the specified err from the server is retryable.
//
// request: the original request sent to the server
// err: the server sent this error to us
//
// The function returns true if the error is retryable and the request
// can be retried, otherwise it returns false.
// We have four mode of communications - 'Stream', 'Watch', 'Do' and 'DoRaw', this
// function allows us to customize the retryability aspect of each.
type IsRetryableErrorFunc func(request *http.Request, err error) bool
func (r IsRetryableErrorFunc) IsErrorRetryable(request *http.Request, err error) bool {
return r(request, err)
}
var neverRetryError = IsRetryableErrorFunc(func(_ *http.Request, _ error) bool {
return false
})
// WithRetry allows the client to retry a request up to a certain number of times
// Note that WithRetry is not safe for concurrent use by multiple
// goroutines without additional locking or coordination.
type WithRetry interface {
// SetMaxRetries makes the request use the specified integer as a ceiling
// for retries upon receiving a 429 status code and the "Retry-After" header
// in the response.
// A zero maxRetries should prevent from doing any retry and return immediately.
SetMaxRetries(maxRetries int)
// NextRetry advances the retry counter appropriately and returns true if the
// request should be retried, otherwise it returns false if:
// - we have already reached the maximum retry threshold.
// - the error does not fall into the retryable category.
// - the server has not sent us a 429, or 5xx status code and the
// 'Retry-After' response header is not set with a value.
//
// if retry is set to true, retryAfter will contain the information
// regarding the next retry.
//
// request: the original request sent to the server
// resp: the response sent from the server, it is set if err is nil
// err: the server sent this error to us, if err is set then resp is nil.
// f: a IsRetryableErrorFunc function provided by the client that determines
// if the err sent by the server is retryable.
NextRetry(req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool)
// BeforeNextRetry is responsible for carrying out operations that need
// to be completed before the next retry is initiated:
// - if the request context is already canceled there is no need to
// retry, the function will return ctx.Err().
// - we need to seek to the beginning of the request body before we
// initiate the next retry, the function should return an error if
// it fails to do so.
// - we should wait the number of seconds the server has asked us to
// in the 'Retry-After' response header.
//
// If BeforeNextRetry returns an error the client should abort the retry,
// otherwise it is safe to initiate the next retry.
BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string, body io.Reader) error
}
// RetryAfter holds information associated with the next retry.
type RetryAfter struct {
// Wait is the duration the server has asked us to wait before
// the next retry is initiated.
// This is the value of the 'Retry-After' response header in seconds.
Wait time.Duration
// Attempt is the Nth attempt after which we have received a retryable
// error or a 'Retry-After' response header from the server.
Attempt int
// Reason describes why we are retrying the request
Reason string
}
type withRetry struct {
maxRetries int
attempts int
}
func (r *withRetry) SetMaxRetries(maxRetries int) {
if maxRetries < 0 {
maxRetries = 0
}
r.maxRetries = maxRetries
}
func (r *withRetry) NextRetry(req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool) {
if req == nil || (resp == nil && err == nil) {
// bad input, we do nothing.
return nil, false
}
r.attempts++
retryAfter := &RetryAfter{Attempt: r.attempts}
if r.attempts > r.maxRetries {
return retryAfter, false
}
// if the server returned an error, it takes precedence over the http response.
var errIsRetryable bool
if f != nil && err != nil && f.IsErrorRetryable(req, err) {
errIsRetryable = true
// we have a retryable error, for which we will create an
// artificial "Retry-After" response.
resp = retryAfterResponse()
}
if err != nil && !errIsRetryable {
return retryAfter, false
}
// if we are here, we have either a or b:
// a: we have a retryable error, for which we already
// have an artificial "Retry-After" response.
// b: we have a response from the server for which we
// need to check if it is retryable
seconds, wait := checkWait(resp)
if !wait {
return retryAfter, false
}
retryAfter.Wait = time.Duration(seconds) * time.Second
retryAfter.Reason = getRetryReason(r.attempts, seconds, resp, err)
return retryAfter, true
}
func (r *withRetry) BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string, body io.Reader) error {
// Ensure the response body is fully read and closed before
// we reconnect, so that we reuse the same TCP connection.
if ctx.Err() != nil {
return ctx.Err()
}
if seeker, ok := body.(io.Seeker); ok && body != nil {
if _, err := seeker.Seek(0, 0); err != nil {
return fmt.Errorf("can't Seek() back to beginning of body for %T", r)
}
}
klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", retryAfter.Wait, retryAfter.Attempt, url)
if backoff != nil {
backoff.Sleep(retryAfter.Wait)
}
return nil
}
// checkWait returns true along with a number of seconds if
// the server instructed us to wait before retrying.
func checkWait(resp *http.Response) (int, bool) {
switch r := resp.StatusCode; {
// any 500 error code and 429 can trigger a wait
case r == http.StatusTooManyRequests, r >= 500:
default:
return 0, false
}
i, ok := retryAfterSeconds(resp)
return i, ok
}
func getRetryReason(retries, seconds int, resp *http.Response, err error) string {
// priority and fairness sets the UID of the FlowSchema
// associated with a request in the following response Header.
const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds)
switch {
case resp.StatusCode == http.StatusTooManyRequests:
// it is server-side throttling from priority and fairness
flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID)
return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID)
case err != nil:
// it's a retryable error
return fmt.Sprintf("%s - retry-reason: due to retryable error, error: %v", message, err)
default:
return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode)
}
}
func readAndCloseResponseBody(resp *http.Response) {
if resp == nil {
return
}
// Ensure the response body is fully read and closed
// before we reconnect, so that we reuse the same TCP
// connection.
const maxBodySlurpSize = 2 << 10
defer resp.Body.Close()
if resp.ContentLength <= maxBodySlurpSize {
io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
}
}
func retryAfterResponse() *http.Response {
return &http.Response{
StatusCode: http.StatusInternalServerError,
Header: http.Header{"Retry-After": []string{"1"}},
}
}