dep: lift kube dependency to v0.18.6

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal 2020-07-24 19:50:51 +05:30 committed by mergify[bot]
parent be9e7cf956
commit 02b8cd0b4b
33 changed files with 412 additions and 211 deletions

56
go.mod
View File

@ -29,38 +29,38 @@ require (
google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 // indirect
google.golang.org/grpc v1.28.0
gopkg.in/square/go-jose.v2 v2.5.0 // indirect
k8s.io/api v0.18.0
k8s.io/apimachinery v0.18.0
k8s.io/client-go v0.18.0
k8s.io/cloud-provider v0.18.0
k8s.io/cri-api v0.18.0 // indirect
k8s.io/api v0.18.6
k8s.io/apimachinery v0.18.6
k8s.io/client-go v0.18.6
k8s.io/cloud-provider v0.18.6
k8s.io/cri-api v0.18.6 // indirect
k8s.io/klog/v2 v2.3.0
k8s.io/kubectl v0.18.0 // indirect
k8s.io/kubernetes v1.18.0
k8s.io/kubectl v0.18.6 // indirect
k8s.io/kubernetes v1.18.6
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
)
replace (
google.golang.org/grpc => google.golang.org/grpc v1.26.0
k8s.io/api => k8s.io/api v0.18.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.0
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0
k8s.io/apiserver => k8s.io/apiserver v0.18.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.0
k8s.io/client-go => k8s.io/client-go v0.18.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.0
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.0
k8s.io/code-generator => k8s.io/code-generator v0.18.0
k8s.io/component-base => k8s.io/component-base v0.18.0
k8s.io/cri-api => k8s.io/cri-api v0.18.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.0
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.0
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.0
k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.0
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.0
k8s.io/kubectl => k8s.io/kubectl v0.18.0
k8s.io/kubelet => k8s.io/kubelet v0.18.0
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.0
k8s.io/metrics => k8s.io/metrics v0.18.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.0
k8s.io/api => k8s.io/api v0.18.6
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.6
k8s.io/apimachinery => k8s.io/apimachinery v0.18.6
k8s.io/apiserver => k8s.io/apiserver v0.18.6
k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.6
k8s.io/client-go => k8s.io/client-go v0.18.6
k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.6
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.6
k8s.io/code-generator => k8s.io/code-generator v0.18.6
k8s.io/component-base => k8s.io/component-base v0.18.6
k8s.io/cri-api => k8s.io/cri-api v0.18.6
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.6
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.6
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.6
k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.6
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.6
k8s.io/kubectl => k8s.io/kubectl v0.18.6
k8s.io/kubelet => k8s.io/kubelet v0.18.6
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.6
k8s.io/metrics => k8s.io/metrics v0.18.6
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.6
)

67
go.sum
View File

@ -798,27 +798,26 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ=
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apiserver v0.18.0 h1:ELAWpGWC6XdbRLi5lwAbEbvksD7hkXxPdxaJsdpist4=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/cloud-provider v0.18.0 h1:Ri82BTdMutzIoiBWz/IS5o7I/+fCgRbO09iPWNfrZEM=
k8s.io/cloud-provider v0.18.0/go.mod h1:ZBq1FhoJ+XoQ8JYBYoyx81LS3JV0RAW/UmHf/6w9E6k=
k8s.io/cluster-bootstrap v0.18.0/go.mod h1:xSe+bOZ3asS/ciT91ESQYGhjOql43aBETfvbCzNvad8=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/component-base v0.18.0 h1:I+lP0fNfsEdTDpHaL61bCAqTZLoiWjEEP304Mo5ZQgE=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
k8s.io/cri-api v0.18.0 h1:ryVwCnvQ2OxtQHe56o85xZ9MkIoW4j0FrjgJG7x/Amw=
k8s.io/cri-api v0.18.0/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s=
k8s.io/csi-translation-lib v0.18.0 h1:tmQWO6UIXUFboUqlsxT27yCJS//RvC7BZvCDdTRyPyU=
k8s.io/csi-translation-lib v0.18.0/go.mod h1:iF8TE4ACSaPqN1qxmiIjvcU1A8VgkOrpcFGD7Z0hVu0=
k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M=
k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apiserver v0.18.6 h1:HcWwcOfhj4Yv6y2igP4ZUuovyPjVLGoZcG0Tsph4Mxo=
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
k8s.io/cli-runtime v0.18.6/go.mod h1:+G/WTNqHgUv636e5y7rhOQ7epUbRXnwmPnhOhD6t9uM=
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
k8s.io/cloud-provider v0.18.6 h1:olYNA3/gQoy7XiSlYP16HFMK2jBWIVWHo0DXTtSvPuo=
k8s.io/cloud-provider v0.18.6/go.mod h1:QnPLLdFkvtx1dEyVMaPUdzVWB+ECzEf+PA3DXwIr8bo=
k8s.io/cluster-bootstrap v0.18.6/go.mod h1:lnM1CXtPImlEBTh5874ZI+ofZzdIy1t2JV9Y+NxvojU=
k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/component-base v0.18.6 h1:Wd6cHGwJN2qpufnirVOB3oMhyhbioGsKEi5HeDBsV+s=
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
k8s.io/cri-api v0.18.6 h1:dxhb+Ii0qThCgl3ZR+LO3wAy8RVzvppYVtyLOUC0fyI=
k8s.io/cri-api v0.18.6/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s=
k8s.io/csi-translation-lib v0.18.6 h1:RNtZr7+SScf0QXf2I5HC09fjDKsobCpBxN7ZHrXGK7U=
k8s.io/csi-translation-lib v0.18.6/go.mod h1:w13PRDbRWol3Z9lM3RjxRd5vi/R9wog1DQHAbzzuKOI=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM=
@ -828,22 +827,26 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco=
k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-aggregator v0.18.0/go.mod h1:ateewQ5QbjMZF/dihEFXwaEwoA4v/mayRvzfmvb6eqI=
k8s.io/kube-controller-manager v0.18.0/go.mod h1:pIRGUrSo+skWzwr5pgWNbgiFWEGSotbamGQpR/gKd5U=
k8s.io/kube-aggregator v0.18.6/go.mod h1:MKm8inLHdeiXQJCl6UdmgMosRrqJgyxO2obTXOkey/s=
k8s.io/kube-controller-manager v0.18.6/go.mod h1:T+Ayh47y1IrvwDSUAh4QT/aIrRcKWlvgdqV5PHrMwNs=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-proxy v0.18.0/go.mod h1:st3Gcg9wYAd1sn6UMeAs5AHN3R0NOItfB5P6qObKrr8=
k8s.io/kube-scheduler v0.18.0 h1:dwVfXf1AWfGgNUHJdNvxRpk8qGnVQqv+KshALC3aWkg=
k8s.io/kube-scheduler v0.18.0/go.mod h1:GFaNT5Z5/zPZsjXmkGihac2qsT+0u2KIHDgXdFfPHPc=
k8s.io/kubectl v0.18.0 h1:hu52Ndq/d099YW+3sS3VARxFz61Wheiq8K9S7oa82Dk=
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
k8s.io/kubelet v0.18.0/go.mod h1:1VULM2m7c/ePlIeNOVVK+kkprayDr1RPf1T8oaNaHuQ=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-proxy v0.18.6/go.mod h1:r3ScLxYTuskh8l2dDfAPdrFK3QnWIMsZI/+Bq5kkmWc=
k8s.io/kube-scheduler v0.18.6 h1:m6p3dclIffw3LgClcg8D16JOCYZl4h0uwaqpuTQgnb4=
k8s.io/kube-scheduler v0.18.6/go.mod h1:J+GApeR/QkU6eYonXir0i7+rcUVWzZPZbNHqjq4FpoQ=
k8s.io/kubectl v0.18.6 h1:IFPNuLPkZ59vSGQzynXY8XGz9yuOSRpkJupnobdYvO4=
k8s.io/kubectl v0.18.6/go.mod h1:3TLzFOrF9h4mlRPAvdNkDbs5NWspN4e0EnPnEB41CGo=
k8s.io/kubelet v0.18.6/go.mod h1:5e0PJYialWMWZgsYWJqI6zVW58y+MaQvmOQwEGFF4Xc=
k8s.io/kubernetes v1.18.0 h1:rVe+edi5GwutPQJ4KIZq1Nk506nmnfyz/KOZVCLv7Yo=
k8s.io/kubernetes v1.18.0/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0=
k8s.io/legacy-cloud-providers v0.18.0/go.mod h1:4Bc9CdZg8wl0mskyhnaXa8DdqLpTUfPEMkw3FZok+H8=
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
k8s.io/kubernetes v1.18.6 h1:2rkR3ffvd5YVyPYU4LAUDCKoKQZtjuuj8ga15mbv96o=
k8s.io/kubernetes v1.18.6/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
k8s.io/legacy-cloud-providers v0.18.6/go.mod h1:0bU6t0dTOd0YkcByIdjx7WD4ihApa+aUrTgVJpqciZU=
k8s.io/metrics v0.18.6/go.mod h1:iAwGeabusQNO3duHDM7BBExTUB8L+iq8PM7N9EtQw6g=
k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8=
k8s.io/sample-apiserver v0.18.0/go.mod h1:1RKw7QEixom4PIw/vjUvDgl2QQbuTXbeCUHLlNCzOjg=
k8s.io/sample-apiserver v0.18.6/go.mod h1:NSRGjwumFclVpq8zewaqGVwiyIR7DQbLAE6wQZ0uljI=
k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=

View File

@ -873,6 +873,9 @@ const (
// FieldManagerConflict is used to report when another client claims to manage this field,
// It should only be returned for a request using server-side apply.
CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict"
// CauseTypeResourceVersionTooLarge is used to report that the requested resource version
// is newer than the data observed by the API server, so the request cannot be served.
CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -178,7 +178,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`"))
}
if fields.FieldsType != "FieldsV1" {
if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`"))
}
}

View File

@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error {
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertSliceNumbers(*v, 0)
case *interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertInterfaceNumbers(v, 0)
default:
return json.Unmarshal(data, v)
}
}
func convertInterfaceNumbers(v *interface{}, depth int) error {
var err error
switch v2 := (*v).(type) {
case json.Number:
*v, err = convertNumber(v2)
case map[string]interface{}:
err = convertMapNumbers(v2, depth+1)
case []interface{}:
err = convertSliceNumbers(v2, depth+1)
}
return err
}
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertMapNumbers(m map[string]interface{}, depth int) error {

View File

@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string {
return result
}
// IsTimeout returns true if the given error is a network timeout error
func IsTimeout(err error) bool {
neterr, ok := err.(net.Error)
return ok && neterr != nil && neterr.Timeout()
}
// IsProbableEOF returns true if the given error resembles a connection termination
// scenario that would justify assuming that the watch is empty.
// These errors are what the Go http stack returns back to us which are general
@ -440,7 +446,7 @@ redirectLoop:
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
break redirectLoop
return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
}
// Reset the connection.

View File

@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance
}
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff()
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained.
// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
// undetermined behavior.
// The BackoffManager is supposed to be called in a single-threaded environment.
type BackoffManager interface {
Backoff() clock.Timer
@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du
Steps: math.MaxInt32,
Cap: maxBackoff,
},
backoffTimer: c.NewTimer(0),
backoffTimer: nil,
initialBackoff: initBackoff,
lastBackoffStart: c.Now(),
backoffResetDuration: resetDuration,
@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
return b.backoff.Step()
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff.
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
// The returned timer must be drained before calling Backoff() the second time
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
if b.backoffTimer == nil {
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
} else {
b.backoffTimer.Reset(b.getNextBackoff())
}
return b.backoffTimer
}
@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C
clock: c,
duration: duration,
jitter: jitter,
backoffTimer: c.NewTimer(0),
backoffTimer: nil,
}
}
@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
return jitteredPeriod
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
// The returned timer must be drained before calling Backoff() the second time
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
j.backoffTimer.Reset(j.getNextBackoff())
backoff := j.getNextBackoff()
if j.backoffTimer == nil {
j.backoffTimer = j.clock.NewTimer(backoff)
} else {
j.backoffTimer.Reset(backoff)
}
return j.backoffTimer
}

View File

@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() {
case io.ErrUnexpectedEOF:
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
default:
if net.IsProbableEOF(err) {
if net.IsProbableEOF(err) || net.IsTimeout(err) {
klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
} else {
sw.result <- Event{

View File

@ -177,7 +177,12 @@ var tooLargeResourceVersionCauseMsg = "Too large resource version"
// a minimum resource version that is larger than the largest currently available resource version for a requested resource.
func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error {
err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds)
err.ErrStatus.Details.Causes = []metav1.StatusCause{{Message: tooLargeResourceVersionCauseMsg}}
err.ErrStatus.Details.Causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeResourceVersionTooLarge,
Message: tooLargeResourceVersionCauseMsg,
},
}
return err
}
@ -186,15 +191,5 @@ func IsTooLargeResourceVersion(err error) bool {
if !errors.IsTimeout(err) {
return false
}
switch t := err.(type) {
case errors.APIStatus:
if d := t.Status().Details; d != nil {
for _, cause := range d.Causes {
if cause.Message == tooLargeResourceVersionCauseMsg {
return true
}
}
}
}
return false
return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
}

View File

@ -655,7 +655,7 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
if err != nil {
// The watch stream mechanism handles many common partial data errors, so closed
// connections can be retried in many cases.
if net.IsProbableEOF(err) {
if net.IsProbableEOF(err) || net.IsTimeout(err) {
return watch.NewEmptyWatch(), nil
}
return nil, err

View File

@ -82,9 +82,9 @@ type Reflector struct {
// observed when doing a sync with the underlying store
// it is thread safe, but not synchronized with the underlying store
lastSyncResourceVersion string
// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
// failed with an HTTP 410 (Gone) status code.
isLastSyncResourceVersionGone bool
// isLastSyncResourceVersionUnavailable is true if the previous list or watch request with
// lastSyncResourceVersion failed with an "expired" or "too large resource version" error.
isLastSyncResourceVersionUnavailable bool
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
lastSyncResourceVersionMutex sync.RWMutex
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
@ -256,13 +256,14 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
}
list, paginatedResult, err = pager.List(context.Background(), options)
if isExpiredError(err) {
r.setIsLastSyncResourceVersionExpired(true)
// Retry immediately if the resource version used to list is expired.
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
r.setIsLastSyncResourceVersionUnavailable(true)
// Retry immediately if the resource version used to list is unavailable.
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
// continuation pages, but the pager might not be enabled, or the full list might fail because the
// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
// to recover and ensure the reflector makes forward progress.
// continuation pages, but the pager might not be enabled, the full list might fail because the
// resource version it is listing at is expired or the cache may not yet be synced to the provided
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
// the reflector makes forward progress.
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
}
close(listCh)
@ -292,7 +293,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
r.paginatedResult = true
}
r.setIsLastSyncResourceVersionExpired(false) // list was successful
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
initTrace.Step("Objects listed")
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
@ -364,6 +365,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
AllowWatchBookmarks: true,
}
// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent
start := r.clock.Now()
w, err := r.listerWatcher.Watch(options)
if err != nil {
switch {
@ -390,11 +393,11 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
return nil
}
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
if err != errorStopRequested {
switch {
case isExpiredError(err):
// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
// has a semantic that it returns data at least as fresh as provided RV.
// So first try to LIST with setting RV to resource version of last observed object.
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
@ -417,8 +420,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
}
// watchHandler watches w and keeps *resourceVersion up to date.
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
start := r.clock.Now()
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
eventCount := 0
// Stopping the watcher should be idempotent and if we return from this function there's no way
@ -518,9 +520,9 @@ func (r *Reflector) relistResourceVersion() string {
r.lastSyncResourceVersionMutex.RLock()
defer r.lastSyncResourceVersionMutex.RUnlock()
if r.isLastSyncResourceVersionGone {
if r.isLastSyncResourceVersionUnavailable {
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
// if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector
// to the latest available ResourceVersion, using a consistent read from etcd.
return ""
}
@ -532,12 +534,12 @@ func (r *Reflector) relistResourceVersion() string {
return r.lastSyncResourceVersion
}
// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
// expired error: HTTP 410 (Gone) Status Code.
func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
// "expired" or "too large resource version" error.
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
r.lastSyncResourceVersionMutex.Lock()
defer r.lastSyncResourceVersionMutex.Unlock()
r.isLastSyncResourceVersionGone = isExpired
r.isLastSyncResourceVersionUnavailable = isUnavailable
}
func isExpiredError(err error) bool {
@ -547,3 +549,7 @@ func isExpiredError(err error) bool {
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
}
func isTooLargeResourceVersionError(err error) bool {
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
}

View File

@ -35,7 +35,7 @@ import (
var (
// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
// DEPRECATED will be replaced
ClusterDefaults = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}
ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()}
// DefaultClientConfig represents the legacy behavior of this package for defaulting
// DEPRECATED will be replace
DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
@ -43,6 +43,15 @@ var (
}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
)
// getDefaultServer returns a default setting for DefaultClientConfig
// DEPRECATED
func getDefaultServer() string {
if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 {
return server
}
return "http://localhost:8080"
}
// ClientConfig is used to make it easy to get an api server client
type ClientConfig interface {
// RawConfig returns the merged result of all overrides

View File

@ -120,7 +120,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
default:
msg := "Watch failed: %v"
if net.IsProbableEOF(err) {
if net.IsProbableEOF(err) || net.IsTimeout(err) {
klog.V(5).Infof(msg, err)
// Retry
return false, 0

12
vendor/k8s.io/cloud-provider/go.mod generated vendored
View File

@ -5,9 +5,9 @@ module k8s.io/cloud-provider
go 1.13
require (
k8s.io/api v0.18.0
k8s.io/apimachinery v0.18.0
k8s.io/client-go v0.18.0
k8s.io/api v0.18.6
k8s.io/apimachinery v0.18.6
k8s.io/client-go v0.18.6
k8s.io/klog v1.0.0
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
)
@ -15,7 +15,7 @@ require (
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/api => k8s.io/api v0.18.0
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0
k8s.io/client-go => k8s.io/client-go v0.18.0
k8s.io/api => k8s.io/api v0.18.6
k8s.io/apimachinery => k8s.io/apimachinery v0.18.6
k8s.io/client-go => k8s.io/client-go v0.18.6
)

10
vendor/k8s.io/cloud-provider/go.sum generated vendored
View File

@ -167,16 +167,16 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=

View File

@ -2,6 +2,6 @@
Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes.
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/csi-api](https://git.k8s.io/kubernetes/staging/src/k8s.io/csi-api) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/csi-translation-lib](https://git.k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information.

View File

@ -5,16 +5,18 @@ module k8s.io/csi-translation-lib
go 1.13
require (
k8s.io/api v0.18.0
k8s.io/apimachinery v0.18.0
k8s.io/cloud-provider v0.18.0
github.com/stretchr/testify v1.4.0
k8s.io/api v0.18.6
k8s.io/apimachinery v0.18.6
k8s.io/cloud-provider v0.18.6
k8s.io/klog v1.0.0
)
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/api => k8s.io/api v0.18.0
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0
k8s.io/client-go => k8s.io/client-go v0.18.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.0
k8s.io/api => k8s.io/api v0.18.6
k8s.io/apimachinery => k8s.io/apimachinery v0.18.6
k8s.io/client-go => k8s.io/client-go v0.18.6
k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.6
)

View File

@ -151,16 +151,16 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/cloud-provider v0.18.0/go.mod h1:ZBq1FhoJ+XoQ8JYBYoyx81LS3JV0RAW/UmHf/6w9E6k=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
k8s.io/cloud-provider v0.18.6/go.mod h1:QnPLLdFkvtx1dEyVMaPUdzVWB+ECzEf+PA3DXwIr8bo=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=

View File

@ -110,22 +110,23 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
return nil, fmt.Errorf("pv is nil or Azure Disk source not defined on pv")
}
azureSource := pv.Spec.PersistentVolumeSource.AzureDisk
var (
azureSource = pv.Spec.PersistentVolumeSource.AzureDisk
// refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md
csiSource := &v1.CSIPersistentVolumeSource{
csiSource = &v1.CSIPersistentVolumeSource{
Driver: AzureDiskDriverName,
VolumeHandle: azureSource.DataDiskURI,
ReadOnly: *azureSource.ReadOnly,
FSType: *azureSource.FSType,
VolumeAttributes: map[string]string{azureDiskKind: "Managed"},
VolumeHandle: azureSource.DataDiskURI,
}
)
if azureSource.CachingMode != nil {
csiSource.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode)
}
if azureSource.FSType != nil {
csiSource.FSType = *azureSource.FSType
csiSource.VolumeAttributes[azureDiskFSType] = *azureSource.FSType
}
@ -133,9 +134,12 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
csiSource.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind)
}
if azureSource.ReadOnly != nil {
csiSource.ReadOnly = *azureSource.ReadOnly
}
pv.Spec.PersistentVolumeSource.AzureDisk = nil
pv.Spec.PersistentVolumeSource.CSI = csiSource
pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes)
return pv, nil
}

View File

@ -18,11 +18,13 @@ package plugins
import (
"fmt"
"regexp"
"strings"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
)
const (
@ -32,14 +34,19 @@ const (
AzureFileInTreePluginName = "kubernetes.io/azure-file"
separator = "#"
volumeIDTemplate = "%s#%s#%s"
volumeIDTemplate = "%s#%s#%s#%s"
// Parameter names defined in azure file CSI driver, refer to
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
azureFileShareName = "shareName"
secretNameTemplate = "azure-storage-account-%s-secret"
defaultSecretNamespace = "default"
)
var _ InTreePlugin = &azureFileCSITranslator{}
var secretNameFormatRE = regexp.MustCompile(`azure-storage-account-(.+)-secret`)
// azureFileCSITranslator handles translation of PV spec from In-tree
// Azure File to CSI Azure File and vice versa
type azureFileCSITranslator struct{}
@ -58,12 +65,18 @@ func (t *azureFileCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.St
// and converts the AzureFile source to a CSIPersistentVolumeSource
func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) {
if volume == nil || volume.AzureFile == nil {
return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume")
return nil, fmt.Errorf("volume is nil or Azure File not defined on volume")
}
azureSource := volume.AzureFile
accountName, err := getStorageAccountName(azureSource.SecretName)
if err != nil {
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
accountName = azureSource.SecretName
}
pv := &v1.PersistentVolume{
var (
pv = &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
// Must be unique per disk as it is used as the unique part of the
// staging path
@ -72,18 +85,21 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName),
Driver: AzureFileDriverName,
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""),
ReadOnly: azureSource.ReadOnly,
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
NodePublishSecretRef: &v1.SecretReference{
Name: azureSource.ShareName,
Namespace: "default",
NodeStageSecretRef: &v1.SecretReference{
Name: azureSource.SecretName,
Namespace: defaultSecretNamespace,
},
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
},
}
)
return pv, nil
}
@ -95,23 +111,33 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
}
azureSource := pv.Spec.PersistentVolumeSource.AzureFile
accountName, err := getStorageAccountName(azureSource.SecretName)
if err != nil {
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
accountName = azureSource.SecretName
}
volumeID := fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, "")
volumeID := fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName)
var (
// refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
csiSource := &v1.CSIPersistentVolumeSource{
VolumeHandle: volumeID,
csiSource = &v1.CSIPersistentVolumeSource{
Driver: AzureFileDriverName,
NodeStageSecretRef: &v1.SecretReference{
Name: azureSource.SecretName,
Namespace: defaultSecretNamespace,
},
ReadOnly: azureSource.ReadOnly,
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
VolumeHandle: volumeID,
}
)
csiSource.NodePublishSecretRef = &v1.SecretReference{
Name: azureSource.ShareName,
Namespace: *azureSource.SecretNamespace,
if azureSource.SecretNamespace != nil {
csiSource.NodeStageSecretRef.Namespace = *azureSource.SecretNamespace
}
pv.Spec.PersistentVolumeSource.AzureFile = nil
pv.Spec.PersistentVolumeSource.CSI = csiSource
pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes)
return pv, nil
}
@ -129,22 +155,21 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
ReadOnly: csiSource.ReadOnly,
}
if csiSource.NodePublishSecretRef != nil && csiSource.NodePublishSecretRef.Name != "" {
azureSource.SecretName = csiSource.NodePublishSecretRef.Name
azureSource.SecretNamespace = &csiSource.NodePublishSecretRef.Namespace
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
azureSource.SecretName = csiSource.NodeStageSecretRef.Name
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
if csiSource.VolumeAttributes != nil {
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
azureSource.ShareName = shareName
}
}
} else {
_, _, fileShareName, err := getFileShareInfo(csiSource.VolumeHandle)
_, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
if err != nil {
return nil, err
}
azureSource.ShareName = fileShareName
// to-do: for dynamic provision scenario in CSI, it uses cluster's identity to get storage account key
// secret for the file share is not created, we may create a serect here
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
}
pv.Spec.CSI = nil
@ -182,12 +207,25 @@ func (t *azureFileCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string)
}
// get file share info according to volume id, e.g.
// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41"
// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41
func getFileShareInfo(id string) (string, string, string, error) {
// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41#diskname.vhd"
// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41, diskname.vhd
func getFileShareInfo(id string) (string, string, string, string, error) {
segments := strings.Split(id, separator)
if len(segments) < 3 {
return "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id)
return "", "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id)
}
return segments[0], segments[1], segments[2], nil
var diskName string
if len(segments) > 3 {
diskName = segments[3]
}
return segments[0], segments[1], segments[2], diskName, nil
}
// get storage account name from secret name
func getStorageAccountName(secretName string) (string, error) {
matches := secretNameFormatRE.FindStringSubmatch(secretName)
if len(matches) != 2 {
return "", fmt.Errorf("could not get account name from %s, correct format: %s", secretName, secretNameFormatRE)
}
return matches[1], nil
}

View File

@ -10,6 +10,7 @@ go_library(
srcs = [
"annotations.go",
"doc.go",
"helpers.go",
"register.go",
"types.go",
"zz_generated.deepcopy.go",

View File

@ -0,0 +1,58 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to serialize round-tripped fields from later API versions,
// and returns false if no changes were made and the original input object was returned.
// It should always be called when converting internal -> external versions, prior
// to setting any of the custom annotations:
//
// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations)
// externalObj.Annotations = annotations
//
// if internal.SomeField != nil {
// if !copiedAnnotations {
// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations)
// copiedAnnotations = true
// }
// externalObj.Annotations[...] = json.Marshal(...)
// }
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
_, hasMetricsSpecs := in[MetricSpecsAnnotation]
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]
_, hasMetricsStatuses := in[MetricStatusesAnnotation]
_, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation]
if hasMetricsSpecs || hasBehaviorSpecs || hasMetricsStatuses || hasConditions {
out = DeepCopyStringMap(in)
delete(out, MetricSpecsAnnotation)
delete(out, BehaviorSpecsAnnotation)
delete(out, MetricStatusesAnnotation)
delete(out, HorizontalPodAutoscalerConditionsAnnotation)
return out, true
}
return in, false
}
// DeepCopyStringMap returns a copy of the input map.
// If input is nil, an empty map is returned.
func DeepCopyStringMap(in map[string]string) map[string]string {
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}

View File

@ -4250,7 +4250,7 @@ type PodLogOptions struct {
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceTime *metav1.Time
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// If true, add an RFC 3339 timestamp with 9 digits of fractional seconds at the beginning of every line
// of log output.
Timestamps bool
// If set, the number of lines from the end of the logs to show. If not specified,

View File

@ -29,4 +29,9 @@ const (
SystemReservedEnforcementKey = "system-reserved"
KubeReservedEnforcementKey = "kube-reserved"
NodeAllocatableNoneKey = "none"
// fixed width version of time.RFC3339Nano
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// variable width RFC3339 time format for lenient parsing of strings into timestamps
RFC3339NanoLenient = "2006-01-02T15:04:05.999999999Z07:00"
)

View File

@ -43,10 +43,10 @@ func NewTimestamp() *Timestamp {
return &Timestamp{time.Now()}
}
// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout,
// ConvertToTimestamp takes a string, parses it using the RFC3339NanoLenient layout,
// and converts it to a Timestamp object.
func ConvertToTimestamp(timeString string) *Timestamp {
parsed, _ := time.Parse(time.RFC3339Nano, timeString)
parsed, _ := time.Parse(RFC3339NanoLenient, timeString)
return &Timestamp{parsed}
}
@ -55,10 +55,10 @@ func (t *Timestamp) Get() time.Time {
return t.time
}
// GetString returns the time in the string format using the RFC3339Nano
// GetString returns the time in the string format using the RFC3339NanoFixed
// layout.
func (t *Timestamp) GetString() string {
return t.time.Format(time.RFC3339Nano)
return t.time.Format(RFC3339NanoFixed)
}
// A type to help sort container statuses based on container names.

View File

@ -117,6 +117,7 @@ func calculateResourceAllocatableRequest(nodeInfo *schedulernodeinfo.NodeInfo, p
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
// PodOverhead feature is enabled, the Overhead is added to the result.
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
var podRequest int64
for i := range pod.Spec.Containers {
@ -125,11 +126,20 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
podRequest += value
}
for i := range pod.Spec.InitContainers {
initContainer := &pod.Spec.InitContainers[i]
value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests)
if podRequest < value {
podRequest = value
}
}
// If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
if quantity, found := pod.Spec.Overhead[resource]; found {
podRequest += quantity.Value()
}
}
return podRequest
}

View File

@ -173,7 +173,10 @@ func (r *Resource) Add(rl v1.ResourceList) {
case v1.ResourcePods:
r.AllowedPodNumber += int(rQuant.Value())
case v1.ResourceEphemeralStorage:
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
r.EphemeralStorage += rQuant.Value()
}
default:
if v1helper.IsScalarResourceName(rName) {
r.AddScalar(rName, rQuant.Value())
@ -565,21 +568,32 @@ func (n *NodeInfo) resetSlicesIfEmpty() {
}
}
// resourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
resPtr := &res
for _, c := range pod.Spec.Containers {
resPtr.Add(c.Resources.Requests)
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
non0CPU += non0CPUReq
non0Mem += non0MemReq
// No non-zero resources for GPUs or opaque resources.
}
for _, ic := range pod.Spec.InitContainers {
resPtr.SetMaxResource(ic.Resources.Requests)
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&ic.Resources.Requests)
if non0CPU < non0CPUReq {
non0CPU = non0CPUReq
}
if non0Mem < non0MemReq {
non0Mem = non0MemReq
}
}
// If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
resPtr.Add(pod.Spec.Overhead)
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
}

View File

@ -40,10 +40,12 @@ go_library(
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],

View File

@ -18,7 +18,9 @@ package util
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
)
// For each of these resources, a pod that doesn't request the resource explicitly
@ -60,6 +62,11 @@ func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.Resourc
}
return requests.Memory().Value()
case v1.ResourceEphemeralStorage:
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
return 0
}
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
if !found {
return 0

View File

@ -351,6 +351,8 @@ type AttachDetachVolumeHost interface {
// CSIDriverLister returns the informer lister for the CSIDriver API Object
CSIDriverLister() storagelistersv1.CSIDriverLister
// VolumeAttachmentLister returns the informer lister for the VolumeAttachment API Object
VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister
// IsAttachDetachController is an interface marker to strictly tie AttachDetachVolumeHost
// to the attachDetachController
IsAttachDetachController() bool

View File

@ -270,7 +270,8 @@
{
"SelectorRegexp": "k8s[.]io/kubernetes/third_party/",
"AllowedPrefixes": [
"k8s.io/kubernetes/third_party/forked/golang/expansion"
"k8s.io/kubernetes/third_party/forked/golang/expansion",
"k8s.io/kubernetes/third_party/forked/ipvs"
],
"ForbiddenPrefixes": []
},

View File

@ -38,8 +38,7 @@ type RegistryList struct {
GoogleContainerRegistry string `yaml:"googleContainerRegistry"`
PrivateRegistry string `yaml:"privateRegistry"`
SampleRegistry string `yaml:"sampleRegistry"`
QuayK8sCSI string `yaml:"quayK8sCSI"`
QuayIncubator string `yaml:"quayIncubator"`
K8sCSI string `yaml:"k8sCSI"`
}
// Config holds an images registry, name, and version
@ -78,8 +77,7 @@ func initReg() RegistryList {
GoogleContainerRegistry: "gcr.io/google-containers",
PrivateRegistry: "gcr.io/k8s-authenticated-test",
SampleRegistry: "gcr.io/google-samples",
QuayK8sCSI: "quay.io/k8scsi",
QuayIncubator: "quay.io/kubernetes_incubator",
K8sCSI: "gcr.io/k8s-staging-csi",
}
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
if repoList == "" {
@ -109,8 +107,7 @@ var (
gcrReleaseRegistry = registry.GcrReleaseRegistry
googleContainerRegistry = registry.GoogleContainerRegistry
invalidRegistry = registry.InvalidRegistry
quayK8sCSI = registry.QuayK8sCSI
quayIncubator = registry.QuayIncubator
k8sCSI = registry.K8sCSI
// PrivateRegistry is an image repository that requires authentication
PrivateRegistry = registry.PrivateRegistry
sampleRegistry = registry.SampleRegistry
@ -227,7 +224,7 @@ func initImageConfigs() map[int]Config {
configs[Mounttest] = Config{e2eRegistry, "mounttest", "1.0"}
configs[MounttestUser] = Config{e2eRegistry, "mounttest-user", "1.0"}
configs[Nautilus] = Config{e2eRegistry, "nautilus", "1.0"}
configs[NFSProvisioner] = Config{quayIncubator, "nfs-provisioner", "v2.2.2"}
configs[NFSProvisioner] = Config{k8sCSI, "nfs-provisioner", "v2.2.2"}
configs[Nginx] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"}
configs[NginxNew] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"}
configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"}
@ -293,8 +290,8 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) {
registryAndUser = gcrReleaseRegistry
case "docker.io/library":
registryAndUser = dockerLibraryRegistry
case "quay.io/k8scsi":
registryAndUser = quayK8sCSI
case "gcr.io/k8s-staging-csi":
registryAndUser = k8sCSI
default:
if countParts == 1 {
// We assume we found an image from docker hub library

24
vendor/modules.txt vendored
View File

@ -303,7 +303,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => k8s.io/api v0.18.0
# k8s.io/api v0.18.6 => k8s.io/api v0.18.6
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
@ -346,7 +346,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apimachinery v0.18.0 => k8s.io/apimachinery v0.18.0
# k8s.io/apimachinery v0.18.6 => k8s.io/apimachinery v0.18.6
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
@ -401,7 +401,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.18.0 => k8s.io/apiserver v0.18.0
# k8s.io/apiserver v0.18.6 => k8s.io/apiserver v0.18.6
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer
@ -441,7 +441,7 @@ k8s.io/apiserver/pkg/storage/etcd3/metrics
k8s.io/apiserver/pkg/storage/value
k8s.io/apiserver/pkg/util/feature
k8s.io/apiserver/pkg/util/webhook
# k8s.io/client-go v0.18.0 => k8s.io/client-go v0.18.0
# k8s.io/client-go v0.18.6 => k8s.io/client-go v0.18.6
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached/memory
k8s.io/client-go/dynamic
@ -622,11 +622,11 @@ k8s.io/client-go/util/homedir
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.18.0 => k8s.io/cloud-provider v0.18.0
# k8s.io/cloud-provider v0.18.6 => k8s.io/cloud-provider v0.18.6
k8s.io/cloud-provider
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/helpers
# k8s.io/component-base v0.18.0 => k8s.io/component-base v0.18.0
# k8s.io/component-base v0.18.6 => k8s.io/component-base v0.18.6
k8s.io/component-base/cli/flag
k8s.io/component-base/config
k8s.io/component-base/featuregate
@ -635,22 +635,22 @@ k8s.io/component-base/metrics/legacyregistry
k8s.io/component-base/metrics/prometheus/ratelimiter
k8s.io/component-base/metrics/testutil
k8s.io/component-base/version
# k8s.io/cri-api v0.18.0 => k8s.io/cri-api v0.18.0
# k8s.io/cri-api v0.18.6 => k8s.io/cri-api v0.18.6
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.18.0 => k8s.io/csi-translation-lib v0.18.0
# k8s.io/csi-translation-lib v0.18.6 => k8s.io/csi-translation-lib v0.18.6
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/klog v1.0.0
k8s.io/klog
# k8s.io/klog/v2 v2.3.0
k8s.io/klog/v2
# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/util/proto
# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.18.0
# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.18.6
k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.18.0 => k8s.io/kubectl v0.18.0
# k8s.io/kubectl v0.18.6 => k8s.io/kubectl v0.18.6
k8s.io/kubectl/pkg/scale
# k8s.io/kubernetes v1.18.0
# k8s.io/kubernetes v1.18.6
k8s.io/kubernetes/pkg/api/legacyscheme
k8s.io/kubernetes/pkg/api/service
k8s.io/kubernetes/pkg/api/v1/pod