mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
697
vendor/k8s.io/kubernetes/CHANGELOG-1.13.md
generated
vendored
697
vendor/k8s.io/kubernetes/CHANGELOG-1.13.md
generated
vendored
@ -1,60 +1,700 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.13.0-rc.2](#v1130-rc2)
|
||||
- [Downloads for v1.13.0-rc.2](#downloads-for-v1130-rc2)
|
||||
- [v1.13.1](#v1131)
|
||||
- [Downloads for v1.13.1](#downloads-for-v1131)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.13.0-rc.1](#changelog-since-v1130-rc1)
|
||||
- [Changelog since v1.13.0](#changelog-since-v1130)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.13.0-rc.1](#v1130-rc1)
|
||||
- [Downloads for v1.13.0-rc.1](#downloads-for-v1130-rc1)
|
||||
- [v1.13.0](#v1130)
|
||||
- [Downloads for v1.13.0](#downloads-for-v1130)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.13.0-beta.2](#changelog-since-v1130-beta2)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.13.0-beta.2](#v1130-beta2)
|
||||
- [Downloads for v1.13.0-beta.2](#downloads-for-v1130-beta2)
|
||||
- [Kubernetes 1.13 Release Notes](#kubernetes-113-release-notes)
|
||||
- [Security Content](#security-content)
|
||||
- [Urgent Upgrade Notes](#urgent-upgrade-notes)
|
||||
- [(No, really, you MUST do this before you upgrade)](#no-really-you-must-do-this-before-you-upgrade)
|
||||
- [Known Issues](#known-issues)
|
||||
- [Deprecations](#deprecations)
|
||||
- [Major Themes](#major-themes)
|
||||
- [SIG API Machinery](#sig-api-machinery)
|
||||
- [SIG Auth](#sig-auth)
|
||||
- [SIG AWS](#sig-aws)
|
||||
- [SIG Azure](#sig-azure)
|
||||
- [SIG Big Data](#sig-big-data)
|
||||
- [SIG CLI](#sig-cli)
|
||||
- [SIG Cloud Provider](#sig-cloud-provider)
|
||||
- [SIG Cluster Lifecycle](#sig-cluster-lifecycle)
|
||||
- [SIG IBM Cloud](#sig-ibm-cloud)
|
||||
- [SIG Multicluster](#sig-multicluster)
|
||||
- [SIG Network](#sig-network)
|
||||
- [SIG Node](#sig-node)
|
||||
- [SIG Openstack](#sig-openstack)
|
||||
- [SIG Scalability](#sig-scalability)
|
||||
- [SIG Scheduling](#sig-scheduling)
|
||||
- [SIG Service Catalog](#sig-service-catalog)
|
||||
- [SIG Storage](#sig-storage)
|
||||
- [SIG UI](#sig-ui)
|
||||
- [SIG VMWare](#sig-vmware)
|
||||
- [SIG Windows](#sig-windows)
|
||||
- [New Features](#new-features)
|
||||
- [Release Notes From SIGs](#release-notes-from-sigs)
|
||||
- [SIG API Machinery](#sig-api-machinery-1)
|
||||
- [SIG Auth](#sig-auth-1)
|
||||
- [SIG Autoscaling](#sig-autoscaling)
|
||||
- [SIG AWS](#sig-aws-1)
|
||||
- [SIG Azure](#sig-azure-1)
|
||||
- [SIG CLI](#sig-cli-1)
|
||||
- [SIG Cloud Provider](#sig-cloud-provider-1)
|
||||
- [SIG Cluster Lifecycle](#sig-cluster-lifecycle-1)
|
||||
- [SIG GCP](#sig-gcp)
|
||||
- [SIG Network](#sig-network-1)
|
||||
- [SIG Node](#sig-node-1)
|
||||
- [SIG OpenStack](#sig-openstack-1)
|
||||
- [SIG Release](#sig-release)
|
||||
- [SIG Scheduling](#sig-scheduling-1)
|
||||
- [SIG Storage](#sig-storage-1)
|
||||
- [SIG Windows](#sig-windows-1)
|
||||
- [External Dependencies](#external-dependencies)
|
||||
- [v1.13.0-rc.2](#v1130-rc2)
|
||||
- [Downloads for v1.13.0-rc.2](#downloads-for-v1130-rc2)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.13.0-beta.1](#changelog-since-v1130-beta1)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.13.0-beta.1](#v1130-beta1)
|
||||
- [Downloads for v1.13.0-beta.1](#downloads-for-v1130-beta1)
|
||||
- [Changelog since v1.13.0-rc.1](#changelog-since-v1130-rc1)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.13.0-rc.1](#v1130-rc1)
|
||||
- [Downloads for v1.13.0-rc.1](#downloads-for-v1130-rc1)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.13.0-alpha.3](#changelog-since-v1130-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.13.0-alpha.3](#v1130-alpha3)
|
||||
- [Downloads for v1.13.0-alpha.3](#downloads-for-v1130-alpha3)
|
||||
- [Changelog since v1.13.0-beta.2](#changelog-since-v1130-beta2)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.13.0-beta.2](#v1130-beta2)
|
||||
- [Downloads for v1.13.0-beta.2](#downloads-for-v1130-beta2)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.13.0-alpha.2](#changelog-since-v1130-alpha2)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.13.0-alpha.2](#v1130-alpha2)
|
||||
- [Downloads for v1.13.0-alpha.2](#downloads-for-v1130-alpha2)
|
||||
- [Changelog since v1.13.0-beta.1](#changelog-since-v1130-beta1)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.13.0-beta.1](#v1130-beta1)
|
||||
- [Downloads for v1.13.0-beta.1](#downloads-for-v1130-beta1)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.13.0-alpha.1](#changelog-since-v1130-alpha1)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.13.0-alpha.1](#v1130-alpha1)
|
||||
- [Downloads for v1.13.0-alpha.1](#downloads-for-v1130-alpha1)
|
||||
- [Changelog since v1.13.0-alpha.3](#changelog-since-v1130-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.13.0-alpha.3](#v1130-alpha3)
|
||||
- [Downloads for v1.13.0-alpha.3](#downloads-for-v1130-alpha3)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.13.0-alpha.2](#changelog-since-v1130-alpha2)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.13.0-alpha.2](#v1130-alpha2)
|
||||
- [Downloads for v1.13.0-alpha.2](#downloads-for-v1130-alpha2)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.13.0-alpha.1](#changelog-since-v1130-alpha1)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.13.0-alpha.1](#v1130-alpha1)
|
||||
- [Downloads for v1.13.0-alpha.1](#downloads-for-v1130-alpha1)
|
||||
- [Client Binaries](#client-binaries-8)
|
||||
- [Server Binaries](#server-binaries-8)
|
||||
- [Node Binaries](#node-binaries-8)
|
||||
- [Changelog since v1.12.0](#changelog-since-v1120)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.13.1
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
|
||||
## Downloads for v1.13.1
|
||||
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes.tar.gz) | `de3858357b2b4444bccc0599c7d0edd3e6ec1a80267ef96883ebcfb06c518ce467dd8720b48084644677a42b8e3ffad9a7d4745b40170ce9dfe5b43310979be1`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-src.tar.gz) | `7f0a8dbd3c7397cc5a5bc0297eb24b8e734c3c7b78e48fc794c525377c3895f4fd84fd0a2fa70c5513cc47ee5a174c22bab54796abc5a8f2b30687642c819a68`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-darwin-386.tar.gz) | `371028dba7a28ec3c8f10b861448cb1574dce25d32d847af254b76b7f158aa4fcda695972e2a08440faa4e16077f8021b07115d0da897bef79c33e702f3be95e`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-darwin-amd64.tar.gz) | `6aa7025308e9fb1eb4415e504e8aa9c7a0a20b09c500cb48df82bbd04443101664b2614fb284875b9670d4bb11e8f1a10190eaf1d54f81f3a9526053958b0802`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-386.tar.gz) | `6453670bb61b4f5f7fe8ae78804864ecd52682b32592f6956faf3d2220884a64fb22ae2e668b63f28ea8fd354c50aa90ce61c60be327fb0b5fcfe2c7835ef559`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-amd64.tar.gz) | `ca00442f50b5d5627357dce97c90c17cb0126d746b887afdab2d4db9e0826532469fd1ee62f40eb6923761618f46752d10993578ca19c8b92c3a2aeb5102a318`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-arm.tar.gz) | `5fa170cbe56b8f5d103f520e2493f911c5eb59b51a6afdbaa9c08196943f1235e533f0384ce7c01c73a020c6889cf8f03cc3642912d0953c74d1098e4b21f3a0`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-arm64.tar.gz) | `710343ad067f0d642c43cd26871828275645b08b4f4c86bd555865318d8fe08b7f0a720174c04d58acffcb26faf563636dc13eef66a2813eac68bb8b994908f4`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-ppc64le.tar.gz) | `0fa7ab255f0cba3adc754337c6184e6ec464aa5a4d6dd4d38aad8a0e2430a0044f4ed1ffcd7cc7c863190d3cda6b84abd12ca7536139d665ad61fe7704e63d30`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-s390x.tar.gz) | `749a8dce5b81e2edbd315841acac64a0e5d17bb1ead8173560b6a4ccc28604bc8254051297ab51cb5df845495bd75a45137827b3386e3962295fec8601563eaa`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-windows-386.tar.gz) | `cd4732fbe569009c426f963318d05ddcc7c63dc27ec9d2bf9c60d716195e3676aa5b0e6ccbde6298f621450d365d41a910ce3ced89bf2ae6d3e81ee2fed0bb16`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-windows-amd64.tar.gz) | `40f5b5d221b3a611511690d316539dc8fb3f4513e4f9eb141bffa17c9ddeee875a462f5bd45e62ce7c7535310fc3e48e3441614700ee9877584c5948ddbef19f`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-amd64.tar.gz) | `e0e48825c5fe33a3f82b1b74847d9bfb8c5716c4313c5e4e6f46be0580e20a1e396a669b8ca446cfa581e3eb75698813249bbfcfc79c8a90793880eb5c177921`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-arm.tar.gz) | `7ff4856e7959cf14eba0e1ab274c0bf0d3193391e7034a936697f0c4813e81d8dda4a019d3185677bee9d1345a6433db3fd6e55f644a0f73d076e0b2014ed172`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-arm64.tar.gz) | `b8c2356002e675bd3de5ee9c2337a12e2a1bbfa2478f8e3b91065a578dfa8d50f596fd606d9f0232b06b8263867a7ca5cc7c04150718b8e40b49ae7d46001c30`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-ppc64le.tar.gz) | `5d3a15b1241d849d8954894aa7f3fb12606f9966f73fc36aa15152038fc385153b0f0e967cc0bf410a5d5894d0269e54eac581d8e79003904d7bc29b33e98684`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-s390x.tar.gz) | `78a9cccaf9d737b519db0866c2e80c472c7136bc723910d08649ece1c420ae7f6e56e610d65c436c56ccef8360c4da0f70e75d0cf47c0c8e739f5138cdc7b0d2`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-amd64.tar.gz) | `3a7881a52885bebe5958f02dc54194cc8c330576b7cf5935189df4f0b754b958917b104e1d3358c0bc9277f13a8eef2176284548d664f27a36baa389fbcc7bea`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-arm.tar.gz) | `d0bfcff3ef7c0aa36005e7b111685438ebd0ea61d48dc68a7bd06eea3782b6eb224f9b651d80c955afa162f766c8b682976db43238562c293d6552cdadf9e934`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-arm64.tar.gz) | `2e23bd00661aceb30fa37e24ab71315755bd93dfcc5ff361d78445a8e9ff99e7b3a56641112af3184e8b107545fba6573a6368a82bd0ce475c81cb53fd44da3b`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-ppc64le.tar.gz) | `8d0fdb743c700d662886636fe67b52202cf9e6e57c2d7de5961b8189d8c03c91fda1d68c47033286efcc582e78be40846e2b1f5c589a0b94794fa2ce3c1ebfee`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-s390x.tar.gz) | `70445038b4db62c3fc99540f5ddbb881387018244242f182332b8eaa7159ce1aa8929145010ab2befd4e101d39c24c61e430928235434c7d7eb54f113860a83a`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-windows-amd64.tar.gz) | `a87ad43f5a6b8f66d1bbd64f9c91e8bcbdf4adc8de0ec3cd559adaa8c14a6fe078ffdf090e52627c0522b79209fcc37bf822b323895dd47b18c20026cb25e9f5`
|
||||
|
||||
## Changelog since v1.13.0
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* Fix overlapping filenames in diff if multiple resources have the same name. ([#71923](https://github.com/kubernetes/kubernetes/pull/71923), [@apelisse](https://github.com/apelisse))
|
||||
* Disable proxy to loopback and linklocal ([#71980](https://github.com/kubernetes/kubernetes/pull/71980), [@micahhausler](https://github.com/micahhausler))
|
||||
* kube-scheduler: restores ability to run without authentication configuration lookup permissions ([#71755](https://github.com/kubernetes/kubernetes/pull/71755), [@liggitt](https://github.com/liggitt))
|
||||
* client-go: restores behavior of populating the BearerToken field in rest.Config objects constructed from kubeconfig files containing tokenFile config, or from in-cluster configuration. An additional BearerTokenFile field is now populated to enable constructed clients to periodically refresh tokens. ([#71713](https://github.com/kubernetes/kubernetes/pull/71713), [@liggitt](https://github.com/liggitt))
|
||||
* apply: fix detection of non-dry-run enabled servers ([#71854](https://github.com/kubernetes/kubernetes/pull/71854), [@apelisse](https://github.com/apelisse))
|
||||
* Scheduler only activates unschedulable pods if node's scheduling related properties change. ([#71551](https://github.com/kubernetes/kubernetes/pull/71551), [@mlmhl](https://github.com/mlmhl))
|
||||
* Fixes pod deletion when cleaning old cronjobs ([#71802](https://github.com/kubernetes/kubernetes/pull/71802), [@soltysh](https://github.com/soltysh))
|
||||
* fix issue: vm sku restriction policy does not work in azure disk attach/detach ([#71941](https://github.com/kubernetes/kubernetes/pull/71941), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* Include CRD for BGPConfigurations, needed for calico 2.x to 3.x upgrade. ([#71868](https://github.com/kubernetes/kubernetes/pull/71868), [@satyasm](https://github.com/satyasm))
|
||||
* UDP connections now support graceful termination in IPVS mode ([#71515](https://github.com/kubernetes/kubernetes/pull/71515), [@lbernail](https://github.com/lbernail))
|
||||
* kubeadm: use kubeconfig flag instead of kubeconfig-dir on init phase bootstrap-token ([#71803](https://github.com/kubernetes/kubernetes/pull/71803), [@yagonobre](https://github.com/yagonobre))
|
||||
* On GCI, NPD starts to monitor kubelet, docker, containerd crashlooping, read-only filesystem and corrupt docker overlay2 issues. ([#71522](https://github.com/kubernetes/kubernetes/pull/71522), [@wangzhen127](https://github.com/wangzhen127))
|
||||
* Fixes an issue where Portworx volumes cannot be mounted if 9001 port is already in use on the host and users remap 9001 to another port. ([#70392](https://github.com/kubernetes/kubernetes/pull/70392), [@harsh-px](https://github.com/harsh-px))
|
||||
* Only use the first IP address got from instance metadata. This is because Azure CNI would set up a list of IP addresses in instance metadata, while only the first one is the Node's IP. ([#71736](https://github.com/kubernetes/kubernetes/pull/71736), [@feiskyer](https://github.com/feiskyer))
|
||||
* kube-controller-manager: fixed issue display help for the deprecated insecure --port flag ([#71601](https://github.com/kubernetes/kubernetes/pull/71601), [@liggitt](https://github.com/liggitt))
|
||||
* Update Cluster Autoscaler version in gce manifests to 1.13.1 (https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.1) ([#71842](https://github.com/kubernetes/kubernetes/pull/71842), [@losipiuk](https://github.com/losipiuk))
|
||||
* kubectl: fixes regression in --sort-by behavior ([#71805](https://github.com/kubernetes/kubernetes/pull/71805), [@liggitt](https://github.com/liggitt))
|
||||
* Fixes apiserver nil pointer panics when requesting v2beta1 autoscaling object metrics ([#71744](https://github.com/kubernetes/kubernetes/pull/71744), [@yue9944882](https://github.com/yue9944882))
|
||||
* Fix scheduling starvation of pods in cluster with large number of unschedulable pods. ([#71488](https://github.com/kubernetes/kubernetes/pull/71488), [@bsalamat](https://github.com/bsalamat))
|
||||
|
||||
|
||||
|
||||
# v1.13.0
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
|
||||
## Downloads for v1.13.0
|
||||
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes.tar.gz) | `7b6a81c9f1b852b1e889c1b62281569a4b8853c79e5675b0910d941dfa7863c97f244f6d607aae3faf60bccd596dedb9d136b7fffeae199876e780904fd9f31e`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-src.tar.gz) | `844b9fbba21374dd190c8f12dd0e5b3303dd2cd7ad25f241d6f7e46f74adf6987afad021553521d4f479c19d87aa8d4d5be77ac7a6715d31a9187a5bab3b397b`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-darwin-386.tar.gz) | `0c010351acb660a75122feb876c9887d46ec2cb466872dd073b7f5b26fdadd96888a350e01606f2ae43606a5a4ab2d9309441f4357cee924b19688f9b02c55dc`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-darwin-amd64.tar.gz) | `c2c40bd202900124f4e9458b067a1e1fc040030dc84ce9bcc6a5beb263de05892c16f3bdafb8d854e343e71f086207f390fd0b60f6e32e770c73294b053da6e4`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-386.tar.gz) | `5f5449be103b103d72a4e2b1028ab014cf7f74781166327f2ae284e4f5ecb539f6b60f36b8f7c7be0ae43dfb30661b2672dd93a1fa7e26d6c67498672674bf12`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-amd64.tar.gz) | `61a6cd3b1fb34507e0b762a45da09d88e34921985970a2ba594e0e5af737d94c966434b4e9f8e84fb73a0aeb5fa3e557344cd2eb902bf73c67d4b4bff33c6831`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-arm.tar.gz) | `dd5591e2b88c347759a138c4d2436a0f5252341d0e8c9fbab16b8f151e2744cbdd0c8583555a451425bc471f11b688ce568d9245caf8a278cbac2b343fdead89`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-arm64.tar.gz) | `894ed30261598ebf3485f3575e95f85e3c353f4d834bf9a6ea53b265427704b43fba5403fbc4d522b3f02afb08e6afaae200af1fe57996291a7c74398ec2fe17`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-ppc64le.tar.gz) | `6c26c807fc730ea736fda75dc57ac73395ba78bb828fffeee18b385be550d8f3ba2bbc27a52a8f15bcbbe68218c7945d9fb725e6759c117422bc0a632c110670`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-s390x.tar.gz) | `41e6e972de77c0bde22fdd779ea64e731b60f32e97e78a024f33fc3e33a3b364b7f77ece7d3c64ad85b7f8fe7c8fc6d6892098a3362d1fe01ebf3d551fe2bf37`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-windows-386.tar.gz) | `442229e5030452901b924a94e7a879d4085597a4f201a5b3fc5ac9806cab5830c836cfa7a33e8f1693fe2e8badc4047bf227d7fb00c537fb1fb4cb7639de5455`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-windows-amd64.tar.gz) | `a11a8e8e732e7292781b9cb1de6e3e41683f95fb3fefc2b1a7b5fb1f064a0d80c0833876d931675135778457d81de9ed2e81caee4b3eb27d9f23c7b722b17442`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-amd64.tar.gz) | `a8e3d457e5bcc1c09eeb66111e8dd049d6ba048c3c0fa90a61814291afdcde93f1c6dbb07beef090d1d8a9958402ff843e9af23ae9f069c17c0a7c6ce4034686`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-arm.tar.gz) | `4e17494767000256775e4dd33c0a9b2d152bd4b5fba9f343b6dfeb5746ff34e400a8e0aaf2153476453225ef57e4bb1ae3635416ab18f9e4dabf4e5cc82f8aaa`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-arm64.tar.gz) | `0ddd0cf0ff56cebfa89efb1972cc2bc6916e824c2af56cfd330ac5638c8918eaf3c60d05714b220dbf4f896160eded123beeba42f5be55fe434a43d04508d86a`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-ppc64le.tar.gz) | `b93828560224e812ed21b57fea5458fa8560745cfec96fc1677b258393c00e208ad9b99467b575e74e01699ffd75f03f5793675032e7306cba7208c1afb53c8d`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-s390x.tar.gz) | `154d565329d5ba52cdb7c3d43d8854b7a9b8e34803c4df6b3e6ae74c1a6e255c78e6559b7546b9158df0e3f7931bbdaf43407d95cd875c79f5cce960bb9882dd`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-amd64.tar.gz) | `9d18ba5f0c3b09edcf29397a496a1e908f4906087be3792989285630d7bcbaf6cd3bdd7b07dace439823885acc808637190f5eaa240b7b4580acf277b67bb553`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-arm.tar.gz) | `959b04ff7b8690413e01bffeabaab2119794dedf06b7aae1743e49988f797cb7e6ff12e1a91af2d4c5f664414f3aa4bd9020521c6a21c1196c194d12a6f7fe08`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-arm64.tar.gz) | `b5c18e8c9e28cf276067c871446720d86b6f162e22c3a5e9343cdbc6857baa6961d09a6908b6acd1bbd132c2e2e526377676babf77b8d3bfb36f8711827c105a`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-ppc64le.tar.gz) | `63e3504d3b115fdf3396968afafd1107b98e5a1a15b7c042a87f5a9cffbdc274f7b06b07ce90eb51876cfffd57cf7f20180bad7e9f9762af577e51f4f13d2f7a`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-s390x.tar.gz) | `21c5c2721febf7fddeada9569f3ecbd059267e5d2cc325d98fb74faf1ae9e9e15899750225a1fc7c25feef96e7705b1456cb489f4882b9eb10e78bd0f590d019`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-windows-amd64.tar.gz) | `3e73d3ecff14b4c85a71bb6cf91b1ab7d9c3075c64bd5ce6863562ab17bf808b0cbc33ddd25346d25040649c1ad89745796afd218190886b54f1d8acc17896e4`
|
||||
|
||||
# Kubernetes 1.13 Release Notes
|
||||
|
||||
## Security Content
|
||||
|
||||
- CVE-2018-1002105, a critical security issue in the Kubernetes API Server, is resolved in v1.13.0 (and in [v1.10.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md/#v11011), [v1.11.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md/#v1115), and [v1.12.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md/#v1123)). We recommend all clusters running previous versions update to one of these releases immediately. See issue [#71411](https://github.com/kubernetes/kubernetes/issues/71411) for details.
|
||||
|
||||
## Urgent Upgrade Notes
|
||||
|
||||
### (No, really, you MUST do this before you upgrade)
|
||||
|
||||
Before upgrading to Kubernetes 1.13, you must keep the following in mind:
|
||||
|
||||
- kube-apiserver
|
||||
- The deprecated `etcd2` storage backend has been removed. Before upgrading a kube-apiserver using `--storage-backend=etcd2`, etcd v2 data must be migrated to the v3 storage backend, and kube-apiserver invocations changed to use `--storage-backend=etcd3`. Please consult the installation procedure used to set up etcd for specific migration instructions. Backups prior to upgrade are always a good practice, but since the etcd2 to etcd3 migration is not reversible, an etcd backup prior to migration is essential.
|
||||
- The deprecated `--etcd-quorum-read` flag has been removed. Quorum reads are now always enabled when fetching data from etcd. Remove the `--etcd-quorum-read` flag from kube-apiserver invocations before upgrading.
|
||||
- kube-controller-manager
|
||||
- The deprecated `--insecure-experimental-approve-all-kubelet-csrs-for-group` flag has been removed.
|
||||
- kubelet
|
||||
- The deprecated `--google-json-key` flag has been removed. Remove the `--google-json-key` flag from kubelet invocations before upgrading. ([#69354](https://github.com/kubernetes/kubernetes/pull/69354), [@yujuhong](https://github.com/yujuhong))
|
||||
- DaemonSet pods now make use of scheduling features that require kubelets to be at 1.11 or above. Ensure all kubelets in the cluster are at 1.11 or above before upgrading kube-controller-manager to 1.13.
|
||||
- The schema for the alpha `CSINodeInfo` CRD has been split into `spec` and `status` fields, and new fields `status.available` and `status.volumePluginMechanism` added. Clusters using the previous alpha schema must delete and recreate the CRD using the new schema. ([#70515](https://github.com/kubernetes/kubernetes/pull/70515), [@davidz627](https://github.com/davidz627))
|
||||
- kube-scheduler dropped support for configuration files with apiVersion `componentconfig/v1alpha1`. Ensure kube-scheduler is configured using command-line flags or a configuration file with apiVersion `kubescheduler.config.k8s.io/v1alpha1` before upgrading to 1.13.
|
||||
- kubectl
|
||||
- The deprecated command `run-container` has been removed. Invocations should use `kubectl run` instead ([#70728](https://github.com/kubernetes/kubernetes/pull/70728), [@Pingan2017](https://github.com/Pingan2017))
|
||||
- client-go releases will no longer have bootstrap (k8s.io/client-go/tools/bootstrap) related code. Any reference to it will break. Please redirect all references to k8s.io/bootstrap instead. ([#67356](https://github.com/kubernetes/kubernetes/pull/67356), [@yliaog](https://github.com/yliaog))
|
||||
- Kubernetes cannot distinguish between GCE Zonal PDs and Regional PDs with the same name. To workaround this issue, precreate PDs with unique names. PDs that are dynamically provisioned do not encounter this issue. ([#70716](https://github.com/kubernetes/kubernetes/pull/70716), [@msau42](https://github.com/msau42))
|
||||
|
||||
## Known Issues
|
||||
|
||||
- If kubelet plugin registration for a driver fails, kubelet will not retry. The driver must delete and recreate the driver registration socket in order to force kubelet to attempt registration again. Restarting only the driver container may not be sufficient to trigger recreation of the socket, instead a pod restart may be required. ([#71487](https://github.com/kubernetes/kubernetes/issues/71487))
|
||||
- In some cases, a Flex volume resize may leave a PVC with erroneous Resizing condition even after volume has been successfully expanded. Users may choose to delete the condition, but it is not required. ([#71470](https://github.com/kubernetes/kubernetes/issues/71470))
|
||||
- The CSI driver-registrar external sidecar container v1.0.0-rc2 is known to take up to 1 minute to start in some cases. We expect this issue to be resolved in a future release of the sidecar container. For verification, please see the release notes of future releases of the external sidecar container. ([#76](https://github.com/kubernetes-csi/driver-registrar/issues/76))
|
||||
- When using IPV6-only, be sure to use `proxy-mode=iptables` as `proxy-mode=ipvs` is known to not work. ([#68437](https://github.com/kubernetes/kubernetes/issues/68437))
|
||||
|
||||
## Deprecations
|
||||
|
||||
- kube-apiserver
|
||||
- The `--service-account-api-audiences` flag is deprecated in favor of `--api-audiences`. The old flag is accepted with a warning but will be removed in a future release. ([#70105](https://github.com/kubernetes/kubernetes/pull/70105), [@mikedanese](https://github.com/mikedanese))
|
||||
- The `--experimental-encryption-provider-config` flag is deprecated in favor of `--encryption-provider-config`. The old flag is accepted with a warning but will be removed in 1.14. ([#71206](https://github.com/kubernetes/kubernetes/pull/71206), [@stlaz](https://github.com/stlaz))
|
||||
- As part of graduating the etcd encryption feature to beta, the configuration file referenced by `--encryption-provider-config` now uses `kind: EncryptionConfiguration` and `apiVersion: apiserver.config.k8s.io/v1`. Support for `kind: EncryptionConfig` and `apiVersion: v1` is deprecated and will be removed in a future release. ([#67383](https://github.com/kubernetes/kubernetes/pull/67383), [@stlaz](https://github.com/stlaz))
|
||||
- The `--deserialization-cache-size` flag is deprecated, and will be removed in a future release. The flag is inactive since the etcd2 storage backend was removed. ([#69842](https://github.com/kubernetes/kubernetes/pull/69842), [@liggitt](https://github.com/liggitt))
|
||||
- The `Node` authorization mode no longer allows kubelets to delete their Node API objects (prior to 1.11, in rare circumstances related to cloudprovider node ID changes, kubelets would attempt to delete/recreate their Node object at startup) ([#71021](https://github.com/kubernetes/kubernetes/pull/71021), [@liggitt](https://github.com/liggitt))
|
||||
- The built-in `system:csi-external-provisioner` and `system:csi-external-attacher` cluster roles are deprecated and will not be auto-created in a future release. CSI deployments should provide their own RBAC role definitions with required permissions. ([#69868](https://github.com/kubernetes/kubernetes/pull/69868), [@pohly]( https://github.com/pohly))
|
||||
- The built-in `system:aws-cloud-provider` cluster role is deprecated and will not be auto-created in a future release. Deployments using the AWS cloud provider should grant required permissions to the `aws-cloud-provider` service account in the `kube-system` namespace as part of deployment. ([#66635](https://github.com/kubernetes/kubernetes/pull/66635), [@wgliang](https://github.com/wgliang))
|
||||
- kubelet
|
||||
- Use of the beta plugin registration directory `{kubelet_root_dir}/plugins/` for registration of external drivers via the kubelet plugin registration protocol is deprecated in favor of `{kubelet_root_dir}/plugins_registry/`. Support for the old directory is planned to be removed in v1.15. Device plugin and CSI storage drivers should switch to the new directory prior to v1.15. Only CSI storage drivers that support 0.x versions of the CSI API are allowed in the old directory. ([#70494](https://github.com/kubernetes/kubernetes/pull/70494) by [@RenaudWasTaken](https://github.com/RenaudWasTaken) and [#71314](https://github.com/kubernetes/kubernetes/pull/71314) by [@saad-ali](https://github.com/saad-ali))
|
||||
- With the release of the CSI 1.0 API, support for CSI drivers using 0.3 and older releases of the CSI API is deprecated, and is planned to be removed in Kubernetes v1.15. CSI drivers should be updated to support the CSI 1.0 API, and deployed in the new kubelet plugin registration directory (`{kubelet_root_dir}/plugins_registry/`) once all nodes in the cluster are at 1.13 or higher ([#71020](https://github.com/kubernetes/kubernetes/pull/71020) and [#71314](https://github.com/kubernetes/kubernetes/pull/71314), both by [@saad-ali](https://github.com/saad-ali))
|
||||
- Use of the `--node-labels` flag to set labels under the `kubernetes.io/` and `k8s.io/` prefix will be subject to restriction by the `NodeRestriction` admission plugin in future releases. [See admission plugin documentation](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction) for allowed labels. ([#68267](https://github.com/kubernetes/kubernetes/pull/68267), [@liggitt](https://github.com/liggitt))
|
||||
- kube-scheduler
|
||||
- The alpha critical pod annotation (`scheduler.alpha.kubernetes.io/critical-pod`) is deprecated. Pod priority should be used instead to mark pods as critical. ([#70298](https://github.com/kubernetes/kubernetes/pull/70298), [@bsalamat](https://github.com/bsalamat))
|
||||
- The following features are now GA, and the associated feature gates are deprecated and will be removed in a future release:
|
||||
- CSIPersistentVolume
|
||||
- GCERegionalPersistentDisk
|
||||
- KubeletPluginsWatcher
|
||||
- VolumeScheduling
|
||||
- kubeadm
|
||||
- The DynamicKubeletConfig feature gate is deprecated. The functionality is still accessible by using the kubeadm alpha kubelet enable-dynamic command.
|
||||
- The command `kubeadm config print-defaults` is deprecated in favor of `kubeadm config print init-defaults` and `kubeadm config print join-defaults` ([#69617](https://github.com/kubernetes/kubernetes/pull/69617), [@rosti](https://github.com/rosti))
|
||||
- support for the `v1alpha3` configuration file format is deprecated and will be removed in 1.14. Use `kubeadm config migrate` to migrate `v1alpha3` configuration files to `v1beta1`, which provides improvements in image repository management, addons configuration, and other areas. The documentation for `v1beta1` can be found here: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1
|
||||
- The `node.status.volumes.attached.devicePath` field is deprecated for CSI volumes and will not be set in future releases ([#71095](https://github.com/kubernetes/kubernetes/pull/71095), [@msau42](https://github.com/msau42))
|
||||
- kubectl
|
||||
- The `kubectl convert` command is deprecated and will be removed in a future release ([#70820](https://github.com/kubernetes/kubernetes/pull/70820), [@seans3](https://github.com/seans3))
|
||||
- Support for passing unknown provider names to the E2E test binaries is deprecated and will be removed in a future release. Use `--provider=skeleton` (no ssh access) or `--provider=local` (local cluster with ssh) instead. ([#70141](https://github.com/kubernetes/kubernetes/pull/70141), [@pohly](https://github.com/pohly))
|
||||
|
||||
## Major Themes
|
||||
|
||||
### SIG API Machinery
|
||||
|
||||
For the 1.13 release, SIG API Machinery is happy to announce that the [dry-run functionality](https://kubernetes.io/docs/reference/using-api/api-concepts/#dry-run) is now beta.
|
||||
|
||||
### SIG Auth
|
||||
|
||||
With this release we've made several important enhancements to core SIG Auth areas. In the authorization category, we've further reduced Kubelet privileges by [restricting node self-updates of labels to a whitelisted selection and by disallowing kubelets from deleting their Node API object](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction). In authentication, we added alpha-level support for automounting improved service account tokens through projected volumes. We also enabled [audience validation in TokenReview](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#tokenreview-v1-authentication-k8s-io) for the new tokens for improved scoping. Under audit logging, the new alpha-level "dynamic audit configuration" adds support for [dynamically registering webhooks to receive a stream of audit events](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#dynamic-backend). Finally, we've enhanced secrets protection by graduating [etcd encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) out of experimental.
|
||||
|
||||
### SIG AWS
|
||||
|
||||
In v1.13 we worked on tighter integrations of Kubernetes API objects with AWS services. These include three out-of-tree alpha feature releases:
|
||||
|
||||
1) Alpha for AWS ALB (Application Load Balancer) integration to Kubernetes Ingress resources.
|
||||
2) Alpha for CSI specification 0.3 integration to AWS EBS (Elastic Block Store)
|
||||
3) Alpha for the cloudprovider-aws cloud controller manager binary. Additionally we added [aws-k8s-tester](https://github.com/kubernetes/test-infra/issues/9814), deployer interface for kubetest, to the test-infra repository. This plugin allowed us to integrate Prow to the 3 subprojects defined above in order to provide CI signal for all 3 features. The CI signal is visible [here](https://testgrid.k8s.io/) under SIG-AWS.
|
||||
|
||||
For detailed release notes on the three alpha features from SIG AWS, please refer to the following Changelogs:
|
||||
|
||||
- [aws-alb-ingress-controller v1.0.0](https://github.com/kubernetes-sigs/aws-alb-ingress-controller/releases/tag/v1.0.0)
|
||||
- [aws-ebs-csi-driver v0.1](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/CHANGELOG-0.1.md)
|
||||
- [cloudprovider-aws external v0.1.0](https://github.com/kubernetes/cloud-provider-aws/blob/master/changelogs/CHANGELOG-0.1.md)
|
||||
|
||||
### SIG Azure
|
||||
|
||||
For 1.13 SIG Azure was focused on adding additional Azure Disk support for Ultra SSD, Standard SSD, and Premium Azure Files. Azure Availability Zones and cross resource group nodes were also moved from Alpha to Beta in 1.13.
|
||||
|
||||
### SIG Big Data
|
||||
|
||||
During the 1.13 release cycle, SIG Big Data has been focused on community engagements relating to 3rd-party project integrations with Kubernetes. There have been no impacts on the 1.13 release.
|
||||
|
||||
### SIG CLI
|
||||
|
||||
Over the course of 1.13 release SIG CLI mostly focused on stabilizing the items we’ve been working on over the past releases such as server-side printing and its support in kubectl, as well as finishing [kubectl diff which is based on server-side dry-run feature](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/#how-to-create-objects). We’ve continued separating kubectl code to prepare for extraction out of main repository. Finally, thanks to the awesome support and feedback from community we’ve managed to promote the new [plugin mechanism to Beta](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/).
|
||||
|
||||
### SIG Cloud Provider
|
||||
|
||||
For v1.13, SIG Cloud Provider has been focused on stabilizing the common APIs and interfaces consumed by cloud providers today. This involved auditing the cloud provider APIs for anything that should be deprecated as well as adding changes where necessary. In addition, SIG Cloud Provider has begun exploratory work around having a “cloud provider” e2e test suite which can be used to test common cloud provider functionalities with resources such as nodes and load balancers.
|
||||
|
||||
We are also continuing our long running effort to extract all the existing cloud providers that live in k8s.io/kubernetes into their own respective repos. Along with this migration, we are slowly transitioning users to use the cloud-controller-manager for any cloud provider features instead of the kube-controller-manager.
|
||||
|
||||
### SIG Cluster Lifecycle
|
||||
|
||||
For 1.13 SIG Cluster Lifecycle is pleased to announce the long awaited promotion of kubeadm to stable GA, and the promotion of kubeadm’s configuration API to `v1beta1`.
|
||||
In this release the SIG again focused on further improving the user experience on cluster creation and also fixing a number of bugs and other assorted improvements.
|
||||
|
||||
Some notable changes in kubeadm since Kubernetes 1.12:
|
||||
|
||||
- kubeadm’s configuration API is now `v1beta1`. The new configuration format provides improvements in - image repository management, addons configuration, and other areas. We encourage `v1alpha3` users to migrate to this configuration API using `kubeadm config migrate`, as `v1alpha3` will be removed in 1.14. The documentation for `v1beta1` can be found here: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1
|
||||
- kubeadm has graduated `kubeadm alpha phase` commands to `kubeadm init phase`. This means that the phases of creating a control-plane node are now tightly integrated as part of the `init` command. Alpha features, not yet ready for GA are still kept under `kubeadm alpha` and we appreciate feedback on them.
|
||||
- `kubeadm init` and `kubeadm init phase` now have a `--image-repository` flag, improving support for environments with limited access to official kubernetes repository.
|
||||
- The DynamicKubeletConfig and SelfHosting functionality was moved outside of `kubeadm init` and feature gates and is now exposed under `kubeadm alpha`.
|
||||
- Kubeadm init phase certs now support the `--csr-only` option, simplifying custom CA creation.
|
||||
- `kubeadm join --experimental-control-plane` now automatically adds a new etcd member for `local etcd` mode, further simplifying required tasks for HA clusters setup.
|
||||
- Improvements were made to `kubeadm reset` related to cleaning etcd and notifying the user about the state of iptables.
|
||||
- kubeadm commands now print warnings if input YAML documents contain unknown or duplicate fields.
|
||||
- kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version.
|
||||
- kubeadm now automatically sets the `--pod-infra-container-image` flag when starting the kubelet.
|
||||
|
||||
### SIG IBM Cloud
|
||||
|
||||
The IBM Cloud SIG was focused on defining its charter and working towards moving its cloud provider code to an external repository with a goal to have this work done by the end of Kubernetes 1.14 release cycle. In the SIG meetings, we also made sure to share updates on the latest Kubernetes developments in the IBM Cloud like the availability of Kubernetes v1.12.2 in the IBM Cloud Kubernetes Service (IKS). The SIG updates were provided in the Kubernetes community weekly call and at the KubeCon China 2018.
|
||||
|
||||
### SIG Multicluster
|
||||
|
||||
Moving Federation v2 from Alpha towards Beta has been the focus of our effort over the past quarter. To this end we engaged with end users, and successfully enlisted additional contributors from companies including IBM, Amadeus, Cisco and others. Federation v2 provides a suite of decoupled API’s and re-usable components for building multi-cluster control planes. We plan to start releasing Beta components in late 2018. In addition, more minor updates were made to our cluster-registry and multi-cluster ingress sub-projects.
|
||||
|
||||
### SIG Network
|
||||
|
||||
For 1.13, the areas of focus were in IPv6, DNS improvements and some smaller items:
|
||||
CoreDNS is now the default cluster DNS passing all of the scale/resource usage tests
|
||||
Node-local DNS cache feature is available in Alpha. This feature deploys a lightweight DNS caching Daemonset that avoids the conntrack and converts queries from UDP to more reliable TCP.
|
||||
PodReady++ feature now has `kubectl` CLI support.
|
||||
|
||||
Progress was made towards finalizing the IPv6 dual stack support KEP and support for topological routing of services.
|
||||
|
||||
### SIG Node
|
||||
|
||||
SIG Node focused on stability and performance improvements in the 1.13 release. A new alpha feature is introduced to improve the mechanism that nodes heartbeat back to the control plane. The `NodeLease` feature results in the node using a `Lease` resource in the `kube-node-lease` namespace that is renewed periodically. The `NodeStatus` that was used previously to heartbeat back to the control plane is only updated when it changes. This reduces load on the control plane for large clusters. The Kubelet plugin registration mechanism, which enables automatic discovery of external plugins (including CSI and device plugins) has been promoted to stable in this release (introduced as alpha in 1.11 and promoted to beta in 1.12).
|
||||
|
||||
### SIG Openstack
|
||||
|
||||
The major theme for the SIG OpenStack release is the work-in-progress for removing the in-tree provider. This work, being done in conjunction with SIG Cloud Provider, is focusing on moving internal APIs that the OpenStack (and other providers) depends upon to staging to guarantee API stability. This work also included abstracting the in-tree Cinder API and refactoring code to the external Cinder provider to remove additional Cinder volume provider code.
|
||||
|
||||
Additional work was also done to implement an OpenStack driver for the Cluster API effort lead by SIG Cluster Lifecycle. For the external Cloud-Provider-OpenStack code, the SIG largely focused on bug fixes and updates to match K8s 1.13 development.
|
||||
|
||||
### SIG Scalability
|
||||
|
||||
SIG Scalability has mostly focused on stability and deflaking our tests, investing into framework for writing scalability tests (ClusterLoader v2) with a goal to migrate all tests to it by the end of 2018 and on the work towards extending definition of Kubernetes scalability by providing more/better user-friendly SLIs/SLOs.
|
||||
|
||||
### SIG Scheduling
|
||||
|
||||
SIG Scheduling has mostly focused on stability in 1.13 and has postponed some of the major features to the next versions. There are still two notable changes: 1. TaintBasedEviction is moved to Beta and will be enabled by default. With this feature enabled, condition taints are automatically added to the nodes and pods can add tolerations for them if needed. 2. Pod critical annotation is deprecated. Pods should use pod priority instead of the annotation.
|
||||
|
||||
It is worth noting again that kube-scheduler will use apiVersion `kubescheduler.config.k8s.io/v1alpha1` instead of `componentconfig/v1alpha1` in its configuration files in 1.13.
|
||||
|
||||
### SIG Service Catalog
|
||||
|
||||
The Service Plan Defaults feature is still under active development.
|
||||
We continue to improve the UX for the svcat CLI, specifically filling in gaps for the new Namespaced Service Broker feature.
|
||||
|
||||
### SIG Storage
|
||||
|
||||
Over the last year, SIG Storage has been focused on adding support for the Container Storage Interface (CSI) to Kubernetes. The specification recently moved to 1.0, and on the heels of this achievement, Kubernetes v1.13 moves CSI support for PersistentVolumes to GA.
|
||||
|
||||
With CSI the Kubernetes volume layer becomes truly extensible, allowing third party storage developers to write drivers making their storage systems available in Kubernetes without having to touch the core code.
|
||||
|
||||
CSI was first introduction as alpha in Kubernetes v1.9 and moved to beta in Kubernetes v1.10.
|
||||
|
||||
You can find a list of sample and production drivers in the [CSI Documentation](https://kubernetes.io/docs/concepts/storage/volumes/#csi).
|
||||
|
||||
SIG Storage also moves support for Block Volumes to beta (introduced as alpha in v1.9) and support for Topology Aware Volume Scheduling to stable (introduced as alpha in v1.9 and promoted to beta in 1.10).
|
||||
|
||||
### SIG UI
|
||||
|
||||
The migration to the newest version of Angular is still under active development as it is most important thing on the roadmap at the moment. We are getting closer to to the new release. We continue fixing bugs and adding other improvements.
|
||||
|
||||
### SIG VMWare
|
||||
|
||||
Major focus for SIG VMware for this release is the work on moving internal APIs that the vSphere provider depends upon to staging to guarantee API stability. This work is being done in conjunction with SIG Cloud Provider and includes the creation of a brand new vsphere-csi plugin to replace the current volume functionalities in-tree.
|
||||
|
||||
Additional work was also done to implement a vSphere provider for the Cluster API effort lead by SIG Cluster Lifecycle. For the out-of-tree vSphere cloud provider, the SIG largely focused on bug fixes and updates to match K8s 1.13 development.
|
||||
|
||||
### SIG Windows
|
||||
|
||||
SIG Windows focused on improving reliability for Windows and Kubernetes support
|
||||
|
||||
## New Features
|
||||
|
||||
- kubelet: When node lease feature is enabled, kubelet reports node status to api server only if there is some change or it didn't report over last report interval. ([#69753](https://github.com/kubernetes/kubernetes/pull/69753), [@wangzhen127](https://github.com/wangzhen127))
|
||||
- vSphereVolume implements Raw Block Volume Support ([#68761](https://github.com/kubernetes/kubernetes/pull/68761), [@fanzhangio](https://github.com/fanzhangio))
|
||||
- CRD supports multi-version Schema, Subresources and AdditionalPrintColumns (NOTE that CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an updated that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must be explicitly set to null). ([#70211](https://github.com/kubernetes/kubernetes/pull/70211), [@roycaihw](https://github.com/roycaihw))
|
||||
- New addon in addon manager that automatically installs CSI CRDs if CSIDriverRegistry or CSINodeInfo feature gates are true. ([#70193](https://github.com/kubernetes/kubernetes/pull/70193), [@saad-ali](https://github.com/saad-ali))
|
||||
- Delegated authorization can now allow unrestricted access for `system:masters` like the main kube-apiserver ([#70671](https://github.com/kubernetes/kubernetes/pull/70671), [@deads2k](https://github.com/deads2k))
|
||||
- Added dns capabilities for Windows CNI plugins: ([#67435](https://github.com/kubernetes/kubernetes/pull/67435), [@feiskyer](https://github.com/feiskyer))
|
||||
- kube-apiserver: `--audit-webhook-version` and `--audit-log-version` now default to `audit.k8s.io/v1` if unspecified ([#70476](https://github.com/kubernetes/kubernetes/pull/70476), [@charrywanganthony](https://github.com/charrywanganthony))
|
||||
- kubeadm: timeoutForControlPlane is introduced as part of the API Server config, that controls the timeout for the wait for control plane to be up. Default value is 4 minutes. ([#70480](https://github.com/kubernetes/kubernetes/pull/70480), [@rosti](https://github.com/rosti))
|
||||
- `--api-audiences` now defaults to the `--service-account-issuer` if the issuer is provided but the API audience is not. ([#70308](https://github.com/kubernetes/kubernetes/pull/70308), [@mikedanese](https://github.com/mikedanese))
|
||||
- Added support for projected volume in describe function ([#70158](https://github.com/kubernetes/kubernetes/pull/70158), [@WanLinghao](https://github.com/WanLinghao))
|
||||
- kubeadm now automatically creates a new stacked etcd member when joining a new control plane node (does not applies to external etcd) ([#69486](https://github.com/kubernetes/kubernetes/pull/69486), [@fabriziopandini](https://github.com/fabriziopandini))
|
||||
- Display the usage of ephemeral-storage when using `kubectl describe node` ([#70268](https://github.com/kubernetes/kubernetes/pull/70268), [@Pingan2017](https://github.com/Pingan2017))
|
||||
- Added functionality to enable br_netfilter and ip_forward for debian packages to improve kubeadm support for CRI runtime besides Docker. ([#70152](https://github.com/kubernetes/kubernetes/pull/70152), [@ashwanikhemani](https://github.com/ashwanikhemani))
|
||||
- Added regions ap-northeast-3 and eu-west-3 to the list of well known AWS regions. ([#70252](https://github.com/kubernetes/kubernetes/pull/70252), [@nckturner](https://github.com/nckturner))
|
||||
- kubeadm: Implemented preflight check to ensure that number of CPUs ([#70048](https://github.com/kubernetes/kubernetes/pull/70048), [@bart0sh](https://github.com/bart0sh))
|
||||
- CoreDNS is now the default DNS server in kube-up deployments. ([#69883](https://github.com/kubernetes/kubernetes/pull/69883), [@chrisohaver](https://github.com/chrisohaver))
|
||||
- Opt out of chowning and chmoding from kubectl cp. ([#69573](https://github.com/kubernetes/kubernetes/pull/69573), [@bjhaid](https://github.com/bjhaid))
|
||||
- Failed to provision volume with StorageClass "azurefile-premium": failed to create share andy-mg1121-dynamic-pvc-1a7b2813-d1b7-11e8-9e96-000d3a03e16b in account f7228f99bcde411e8ba4900: failed to create file share, err: storage: service returned error: StatusCode=400, ErrorCode=InvalidHeaderValue, ErrorMessage=The value for one of the HTTP headers is not in the correct format. ([#69718](https://github.com/kubernetes/kubernetes/pull/69718), [@andyzhangx](https://github.com/andyzhangx))
|
||||
- `TaintBasedEvictions` feature is promoted to beta. ([#69824](https://github.com/kubernetes/kubernetes/pull/69824), [@Huang-Wei](https://github.com/Huang-Wei))
|
||||
- Fixed https://github.com/kubernetes/client-go/issues/478 by adding support for JSON Patch in client-go/dynamic/fake ([#69330](https://github.com/kubernetes/kubernetes/pull/69330), [@vaikas-google](https://github.com/vaikas-google))
|
||||
- Dry-run is promoted to Beta and will be enabled by default. ([#69644](https://github.com/kubernetes/kubernetes/pull/69644), [@apelisse](https://github.com/apelisse))
|
||||
- `kubectl get priorityclass` now prints value column by default. ([#69431](https://github.com/kubernetes/kubernetes/pull/69431), [@Huang-Wei](https://github.com/Huang-Wei))
|
||||
- Added a new container based image for running e2e tests ([#69368](https://github.com/kubernetes/kubernetes/pull/69368), [@dims](https://github.com/dims))
|
||||
- The `LC_ALL` and `LC_MESSAGES` env vars can now be used to set desired locale for `kubectl` while keeping `LANG` unchanged. ([#69500](https://github.com/kubernetes/kubernetes/pull/69500), [@m1kola](https://github.com/m1kola))
|
||||
- NodeLifecycleController: Now node lease renewal is treated as the heartbeat signal from the node, in addition to NodeStatus Update. ([#69241](https://github.com/kubernetes/kubernetes/pull/69241), [@wangzhen127](https://github.com/wangzhen127))
|
||||
- Added dynamic shared informers to write generic, non-generated controllers ([#69308](https://github.com/kubernetes/kubernetes/pull/69308), [@p0lyn0mial](https://github.com/p0lyn0mial))
|
||||
- Upgraded to etcd 3.3 client ([#69322](https://github.com/kubernetes/kubernetes/pull/69322), [@jpbetz](https://github.com/jpbetz))
|
||||
- It is now possible to use named ports in the `kubectl port-forward` command ([#69477](https://github.com/kubernetes/kubernetes/pull/69477), [@m1kola](https://github.com/m1kola))
|
||||
- `kubectl wait` now supports condition value checks other than true using `--for condition=available=false` ([#69295](https://github.com/kubernetes/kubernetes/pull/69295), [@deads2k](https://github.com/deads2k))
|
||||
- Updated defaultbackend image to 1.5. Users should concentrate on updating scripts to the new version. ([#69120](https://github.com/kubernetes/kubernetes/pull/69120), [@aledbf](https://github.com/aledbf))
|
||||
- Bumped Dashboard version to v1.10.0 ([#68450](https://github.com/kubernetes/kubernetes/pull/68450), [@jeefy](https://github.com/jeefy))
|
||||
- Added env variables to control CPU requests of kube-controller-manager and kube-scheduler. ([#68823](https://github.com/kubernetes/kubernetes/pull/68823), [@loburm](https://github.com/loburm))
|
||||
- PodSecurityPolicy objects now support a `MayRunAs` rule for `fsGroup` and `supplementalGroups` options. This allows specifying ranges of allowed GIDs for pods/containers without forcing a default GID the way `MustRunAs` does. This means that a container to which such a policy applies to won't use any fsGroup/supplementalGroup GID if not explicitly specified, yet a specified GID must still fall in the GID range according to the policy. ([#65135](https://github.com/kubernetes/kubernetes/pull/65135), [@stlaz](https://github.com/stlaz))
|
||||
- Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128))
|
||||
- When the BoundServiceAccountTokenVolumes Alpha feature is enabled, ServiceAccount volumes now use a projected volume source and their names have the prefix "kube-api-access". ([#69848](https://github.com/kubernetes/kubernetes/pull/69848), [@mikedanese](https://github.com/mikedanese))
|
||||
- Raw block volume support is promoted to beta, and enabled by default. This is accessible via the `volumeDevices` container field in pod specs, and the `volumeMode` field in persistent volume and persistent volume claims definitions. ([#71167](https://github.com/kubernetes/kubernetes/pull/71167), [@msau42](https://github.com/msau42))
|
||||
- TokenReview now supports audience validation of tokens with audiences other than the kube-apiserver. ([#62692](https://github.com/kubernetes/kubernetes/pull/62692), [@mikedanese](https://github.com/mikedanese))
|
||||
- StatefulSet is supported in `kubectl autoscale` command ([#71103](https://github.com/kubernetes/kubernetes/pull/71103), [@Pingan2017](https://github.com/Pingan2017))
|
||||
- Kubernetes v1.13 moves support for Container Storage Interface to GA. As part of this move Kubernetes now supports CSI v1.0.0 and deprecates support for CSI 0.3 and older releases. Older CSI drivers must be updated to CSI 1.0 and moved to the new kubelet plugin registration directory in order to work with Kubernetes 1.15+. ([#71020](https://github.com/kubernetes/kubernetes/pull/71020), [@saad-ali](https://github.com/saad-ali))
|
||||
- Added option to create CSRs instead of certificates for kubeadm init phase certs and kubeadm alpha certs renew ([#70809](https://github.com/kubernetes/kubernetes/pull/70809), [@liztio](https://github.com/liztio))
|
||||
- Added a kubelet socket which serves an grpc service containing the devices used by containers on the node. ([#70508](https://github.com/kubernetes/kubernetes/pull/70508), [@dashpole](https://github.com/dashpole))
|
||||
- Added DynamicAuditing feature which allows for the configuration of audit webhooks through the use of an AuditSink API object. ([#67257](https://github.com/kubernetes/kubernetes/pull/67257), [@pbarker](https://github.com/pbarker))
|
||||
- The kube-apiserver's healthz now takes in an optional query parameter which allows you to disable health checks from causing healthz failures. ([#70676](https://github.com/kubernetes/kubernetes/pull/70676), [@logicalhan](https://github.com/logicalhan))
|
||||
- Introduced support for running a nodelocal dns cache. It is disabled by default, can be enabled by setting KUBE_ENABLE_NODELOCAL_DNS=true ([#70555](https://github.com/kubernetes/kubernetes/pull/70555), [@prameshj](https://github.com/prameshj))
|
||||
- Added readiness gates in extended output for pods ([#70775](https://github.com/kubernetes/kubernetes/pull/70775), [@freehan](https://github.com/freehan))
|
||||
- Added `Ready` column and improve human-readable output of Deployments and StatefulSets ([#70466](https://github.com/kubernetes/kubernetes/pull/70466), [@Pingan2017](https://github.com/Pingan2017))
|
||||
- Added `kubelet_container_log_size_bytes` metric representing the log file size of a container. ([#70749](https://github.com/kubernetes/kubernetes/pull/70749), [@brancz](https://github.com/brancz))
|
||||
- NodeLifecycleController: When node lease feature is enabled, node lease will be deleted when the corresponding node is deleted. ([#70034](https://github.com/kubernetes/kubernetes/pull/70034), [@wangzhen127](https://github.com/wangzhen127))
|
||||
- GCERegionalPersistentDisk feature is GA now! ([#70716](https://github.com/kubernetes/kubernetes/pull/70716), [@jingxu97](https://github.com/jingxu97))
|
||||
- Added secure port 10259 to the kube-scheduler (enabled by default) and deprecate old insecure port 10251. Without further flags self-signed certs are created on startup in memory. ([#69663](https://github.com/kubernetes/kubernetes/pull/69663), [@sttts](https://github.com/sttts))
|
||||
|
||||
## Release Notes From SIGs
|
||||
|
||||
### SIG API Machinery
|
||||
|
||||
- The OwnerReferencesPermissionEnforcement admission plugin now checks authorization for the correct scope (namespaced or cluster-scoped) of the owner resource type. Previously, it always checked permissions at the same scope as the child resource. ([#70389](https://github.com/kubernetes/kubernetes/pull/70389), [@caesarxuchao](https://github.com/caesarxuchao))
|
||||
- OpenAPI spec now correctly marks delete request's body parameter as optional ([#70032](https://github.com/kubernetes/kubernetes/pull/70032), [@iamneha](https://github.com/iamneha))
|
||||
- The rules for incrementing `metadata.generation` of custom resources changed: ([#69059](https://github.com/kubernetes/kubernetes/pull/69059), [@caesarxuchao](https://github.com/caesarxuchao))
|
||||
- If the custom resource participates the spec/status convention, the metadata.generation of the CR increments when there is any change, except for the changes to the metadata or the changes to the status.
|
||||
- If the custom resource does not participate the spec/status convention, the metadata.generation of the CR increments when there is any change to the CR, except for changes to the metadata.
|
||||
- A custom resource is considered to participate the spec/status convention if and only if the "CustomResourceSubresources" feature gate is turned on and the CRD has `.spec.subresources.status={}`.
|
||||
- Fixed patch/update operations on multi-version custom resources ([#70087](https://github.com/kubernetes/kubernetes/pull/70087), [@liggitt](https://github.com/liggitt))
|
||||
- Reduced memory utilization of admission webhook metrics by removing resource related labels. ([#69895](https://github.com/kubernetes/kubernetes/pull/69895), [@jpbetz](https://github.com/jpbetz))
|
||||
- Kubelet can now parse PEM file containing both TLS certificate and key in arbitrary order. Previously key was always required to be first. ([#69536](https://github.com/kubernetes/kubernetes/pull/69536), [@awly](https://github.com/awly))
|
||||
- Code-gen: Removed lowercasing for project imports ([#68484](https://github.com/kubernetes/kubernetes/pull/68484), [@jsturtevant](https://github.com/jsturtevant))
|
||||
- Fixed client cert setup in delegating authentication logic ([#69430](https://github.com/kubernetes/kubernetes/pull/69430), [@DirectXMan12](https://github.com/DirectXMan12))
|
||||
- OpenAPI spec and API reference now reflect dryRun query parameter for POST/PUT/PATCH operations ([#69359](https://github.com/kubernetes/kubernetes/pull/69359), [@roycaihw](https://github.com/roycaihw))
|
||||
- Fixed the sample-apiserver so that its BanFlunder admission plugin can be used. ([#68417](https://github.com/kubernetes/kubernetes/pull/68417), [@MikeSpreitzer](https://github.com/MikeSpreitzer))
|
||||
- APIService availability related to networking glitches are corrected faster ([#68678](https://github.com/kubernetes/kubernetes/pull/68678), [@deads2k](https://github.com/deads2k))
|
||||
- Fixed an issue with stuck connections handling error responses ([#71412](https://github.com/kubernetes/kubernetes/pull/71412), [@liggitt](https://github.com/liggitt))
|
||||
- apiserver: fixed handling and logging of panics in REST handlers ([#71076](https://github.com/kubernetes/kubernetes/pull/71076), [@liggitt](https://github.com/liggitt))
|
||||
- kube-controller-manager no longer removes ownerReferences from ResourceQuota objects ([#70035](https://github.com/kubernetes/kubernetes/pull/70035), [@liggitt](https://github.com/liggitt))
|
||||
- "unfinished_work_microseconds" is added to the workqueue metrics; it can be used to detect stuck worker threads. (kube-controller-manager runs many workqueues.) ([#70884](https://github.com/kubernetes/kubernetes/pull/70884), [@lavalamp](https://github.com/lavalamp))
|
||||
- Timeouts set in ListOptions for clients are also be respected locally ([#70998](https://github.com/kubernetes/kubernetes/pull/70998), [@deads2k](https://github.com/deads2k))
|
||||
- Added support for CRD conversion webhook ([#67006](https://github.com/kubernetes/kubernetes/pull/67006), [@mbohlool](https://github.com/mbohlool))
|
||||
- client-go: fixed sending oversized data frames to spdystreams in remotecommand.NewSPDYExecutor ([#70999](https://github.com/kubernetes/kubernetes/pull/70999), [@liggitt](https://github.com/liggitt))
|
||||
- Fixed missing flags in `-controller-manager --help`. ([#71298](https://github.com/kubernetes/kubernetes/pull/71298), [@stewart-yu](https://github.com/stewart-yu))
|
||||
- Fixed missing flags in `kube-apiserver --help`. ([#70204](https://github.com/kubernetes/kubernetes/pull/70204), [@imjching](https://github.com/imjching))
|
||||
- The caBundle and service fields in admission webhook API objects now correctly indicate they are optional ([#70138](https://github.com/kubernetes/kubernetes/pull/70138), [@liggitt](https://github.com/liggitt))
|
||||
- Fixed an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt))
|
||||
- kube-controller-manager and cloud-controller-manager now hold generated serving certificates in-memory unless a writeable location is specified with --cert-dir ([#69884](https://github.com/kubernetes/kubernetes/pull/69884), [@liggitt](https://github.com/liggitt))
|
||||
- CCM server will not listen insecurely if secure port is specified ([#68982](https://github.com/kubernetes/kubernetes/pull/68982), [@aruneli](https://github.com/aruneli))
|
||||
- List operations against the API now return internal server errors instead of partially complete lists when a value cannot be transformed from storage. The updated behavior is consistent with all other operations that require transforming data from storage such as watch and get. ([#69399](https://github.com/kubernetes/kubernetes/pull/69399), [@mikedanese](https://github.com/mikedanese))
|
||||
|
||||
### SIG Auth
|
||||
|
||||
- API Server can be configured to reject requests that cannot be audit-logged. ([#65763](https://github.com/kubernetes/kubernetes/pull/65763), [@x13n](https://github.com/x13n))
|
||||
- Go clients created from a kubeconfig that specifies a TokenFile now periodically reload the token from the specified file. ([#70606](https://github.com/kubernetes/kubernetes/pull/70606), [@mikedanese](https://github.com/mikedanese))
|
||||
- When `--rotate-server-certificates` is enabled, kubelet will no longer request a new certificate on startup if the current certificate on disk is satisfactory. ([#69991](https://github.com/kubernetes/kubernetes/pull/69991), [@agunnerson-ibm](https://github.com/agunnerson-ibm))
|
||||
- Added dynamic audit configuration api ([#67547](https://github.com/kubernetes/kubernetes/pull/67547), [@pbarker](https://github.com/pbarker))
|
||||
- Added ability to control primary GID of containers through Pod Spec and PodSecurityPolicy ([#67802](https://github.com/kubernetes/kubernetes/pull/67802), [@krmayankk](https://github.com/krmayankk))
|
||||
- kube-apiserver: the `NodeRestriction` admission plugin now prevents kubelets from modifying `Node` labels prefixed with `node-restriction.kubernetes.io/`. The `node-restriction.kubernetes.io/` label prefix is reserved for cluster administrators to use for labeling `Node` objects to target workloads to nodes in a way that kubelets cannot modify or spoof. ([#68267](https://github.com/kubernetes/kubernetes/pull/68267), [@liggitt](https://github.com/liggitt))
|
||||
|
||||
### SIG Autoscaling
|
||||
|
||||
- Updated Cluster Autoscaler version to 1.13.0. See the [Release Notes](https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0) for more information. ([#71513](https://github.com/kubernetes/kubernetes/pull/71513), [@losipiuk](https://github.com/losipiuk))
|
||||
|
||||
### SIG AWS
|
||||
|
||||
- `service.beta.kubernetes.io/aws-load-balancer-internal` now supports true and false values, previously it only supported non-empty strings ([#69436](https://github.com/kubernetes/kubernetes/pull/69436), [@mcrute](https://github.com/mcrute))
|
||||
- Added `service.beta.kubernetes.io/aws-load-balancer-security-groups` annotation to set the security groups to the AWS ELB to be the only ones specified in the annotation in case this is present (does not add `0.0.0.0/0`). ([#62774](https://github.com/kubernetes/kubernetes/pull/62774), [@Raffo](https://github.com/Raffo))
|
||||
|
||||
### SIG Azure
|
||||
|
||||
- Ensured orphan public IPs on Azure deleted when service recreated with the same name. ([#70463](https://github.com/kubernetes/kubernetes/pull/70463), [@feiskyer](https://github.com/feiskyer))
|
||||
- Improved Azure instance metadata handling by adding caches. ([#70353](https://github.com/kubernetes/kubernetes/pull/70353), [@feiskyer](https://github.com/feiskyer))
|
||||
- Corrected check for non-Azure managed nodes with the Azure cloud provider ([#70135](https://github.com/kubernetes/kubernetes/pull/70135), [@marc-sensenich](https://github.com/marc-sensenich))
|
||||
- Fixed azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx))
|
||||
- DisksAreAttached --> getNodeDataDisks--> GetDataDisks --> getVirtualMachine --> vmCache.Get ([#71495](https://github.com/kubernetes/kubernetes/pull/71495), [@andyzhangx](https://github.com/andyzhangx))
|
||||
|
||||
### SIG CLI
|
||||
|
||||
- `kubectl apply` can now change a deployment strategy from rollout to recreate without explicitly clearing the rollout-related fields ([#70436](https://github.com/kubernetes/kubernetes/pull/70436), [@liggitt](https://github.com/liggitt))
|
||||
- The `kubectl plugin list` command now displays discovered plugin paths in the same order as they are found in a user's PATH variable. ([#70443](https://github.com/kubernetes/kubernetes/pull/70443), [@juanvallejo](https://github.com/juanvallejo))
|
||||
- `kubectl get` no longer exits before printing all of its results if an error is found ([#70311](https://github.com/kubernetes/kubernetes/pull/70311), [@juanvallejo](https://github.com/juanvallejo))
|
||||
- Fixed a runtime error occuring when sorting the output of `kubectl get` with empty results ([#70740](https://github.com/kubernetes/kubernetes/pull/70740), [@mfpierre](https://github.com/mfpierre))
|
||||
- kubectl: support multiple arguments for cordon/uncordon and drain ([#68655](https://github.com/kubernetes/kubernetes/pull/68655), [@goodluckbot](https://github.com/goodluckbot))
|
||||
- Fixed ability for admin/edit/view users to see controller revisions, needed for kubectl rollout commands ([#70699](https://github.com/kubernetes/kubernetes/pull/70699), [@liggitt](https://github.com/liggitt))
|
||||
- `kubectl rollout undo` now returns errors when attempting to rollback a deployment to a non-existent revision ([#70039](https://github.com/kubernetes/kubernetes/pull/70039), [@liggitt](https://github.com/liggitt))
|
||||
- kubectl run now generates apps/v1 deployments by default ([#71006](https://github.com/kubernetes/kubernetes/pull/71006), [@liggitt](https://github.com/liggitt))
|
||||
- The "kubectl cp" command now supports path shortcuts (../) in remote paths. ([#65189](https://github.com/kubernetes/kubernetes/pull/65189), [@juanvallejo](https://github.com/juanvallejo))
|
||||
- Fixed dry-run output in kubectl apply --prune ([#69344](https://github.com/kubernetes/kubernetes/pull/69344), [@zegl](https://github.com/zegl))
|
||||
- The kubectl wait command must handle when a watch returns an error vs closing by printing out the error and retrying the watch. ([#69389](https://github.com/kubernetes/kubernetes/pull/69389), [@smarterclayton](https://github.com/smarterclayton))
|
||||
- kubectl: support multiple arguments for cordon/uncordon and drain ([#68655](https://github.com/kubernetes/kubernetes/pull/68655), [@goodluckbot](https://github.com/goodluckbot))
|
||||
|
||||
### SIG Cloud Provider
|
||||
|
||||
- Added deprecation warning for all cloud providers ([#69171](https://github.com/kubernetes/kubernetes/pull/69171), [@andrewsykim](https://github.com/andrewsykim))
|
||||
|
||||
### SIG Cluster Lifecycle
|
||||
|
||||
- kubeadm: Updates version of CoreDNS to 1.2.6 ([#70796](https://github.com/kubernetes/kubernetes/pull/70796), [@detiber](https://github.com/detiber))
|
||||
- kubeadm: Validate kubeconfig files in case of external CA mode. ([#70537](https://github.com/kubernetes/kubernetes/pull/70537), [@yagonobre](https://github.com/yagonobre))
|
||||
- kubeadm: The writable config file option for extra volumes is renamed to readOnly with a reversed meaning. With readOnly defaulted to false (as in pod specs). ([#70495](https://github.com/kubernetes/kubernetes/pull/70495), [@rosti](https://github.com/rosti))
|
||||
- kubeadm: Multiple API server endpoints support upon join is removed as it is now redundant. ([#69812](https://github.com/kubernetes/kubernetes/pull/69812), [@rosti](https://github.com/rosti))
|
||||
- `kubeadm reset` now cleans up custom etcd data path ([#70003](https://github.com/kubernetes/kubernetes/pull/70003), [@yagonobre](https://github.com/yagonobre))
|
||||
- kubeadm: Fixed unnecessary upgrades caused by undefined order of Volumes and VolumeMounts in manifests ([#70027](https://github.com/kubernetes/kubernetes/pull/70027), [@bart0sh](https://github.com/bart0sh))
|
||||
- kubeadm: Fixed node join taints. ([#69846](https://github.com/kubernetes/kubernetes/pull/69846), [@andrewrynhard](https://github.com/andrewrynhard))
|
||||
- Fixed cluster autoscaler addon permissions so it can access batch/job. ([#69858](https://github.com/kubernetes/kubernetes/pull/69858), [@losipiuk](https://github.com/losipiuk))
|
||||
- kubeadm: JoinConfiguration now houses the discovery options in a nested Discovery structure, which in turn has a couple of other nested structures to house more specific options (BootstrapTokenDiscovery and FileDiscovery) ([#67763](https://github.com/kubernetes/kubernetes/pull/67763), [@rosti](https://github.com/rosti))
|
||||
- kubeadm: Fixed a possible scenario where kubeadm can pull much newer control-plane images ([#69301](https://github.com/kubernetes/kubernetes/pull/69301), [@neolit123](https://github.com/neolit123))
|
||||
- kubeadm now allows mixing of init/cluster and join configuration in a single YAML file (although a warning gets printed in this case). ([#69426](https://github.com/kubernetes/kubernetes/pull/69426), [@rosti](https://github.com/rosti))
|
||||
- kubeadm: Added a `v1beta1` API. ([#69289](https://github.com/kubernetes/kubernetes/pull/69289), [@fabriziopandini](https://github.com/fabriziopandini))
|
||||
- kubeadm init correctly uses `--node-name` and `--cri-socket` when `--config` option is also used ([#71323](https://github.com/kubernetes/kubernetes/pull/71323), [@bart0sh](https://github.com/bart0sh))
|
||||
- kubeadm: Always pass spec.nodeName as `--hostname-override` for kube-proxy ([#71283](https://github.com/kubernetes/kubernetes/pull/71283), [@Klaven](https://github.com/Klaven))
|
||||
- `kubeadm join` correctly uses `--node-name` and `--cri-socket` when `--config` option is also used ([#71270](https://github.com/kubernetes/kubernetes/pull/71270), [@bart0sh](https://github.com/bart0sh))
|
||||
- kubeadm now supports the `--image-repository` flag for customizing what registry to pull images from ([#71135](https://github.com/kubernetes/kubernetes/pull/71135), [@luxas](https://github.com/luxas))
|
||||
- kubeadm: The writable config file option for extra volumes is renamed to readOnly with a reversed meaning. With readOnly defaulted to false (as in pod specs). ([#70495](https://github.com/kubernetes/kubernetes/pull/70495), [@rosti](https://github.com/rosti))
|
||||
- kubeadm: Multiple API server endpoints support upon join is removed as it is now redundant. ([#69812](https://github.com/kubernetes/kubernetes/pull/69812), [@rosti](https://github.com/rosti))
|
||||
- kubeadm: JoinConfiguration now houses the discovery options in a nested Discovery structure, which in turn has a couple of other nested structures to house more specific options (BootstrapTokenDiscovery and FileDiscovery) ([#67763](https://github.com/kubernetes/kubernetes/pull/67763), [@rosti](https://github.com/rosti))
|
||||
- kubeadm: Added a `v1beta1` API. ([#69289](https://github.com/kubernetes/kubernetes/pull/69289), [@fabriziopandini](https://github.com/fabriziopandini))
|
||||
- kubeadm: Use `advertise-client-urls` instead of `listen-client-urls` as and `etcd-servers` options for apiserver. ([#69827](https://github.com/kubernetes/kubernetes/pull/69827), [@tomkukral](https://github.com/tomkukral))
|
||||
- Kubeadm now respects the custom image registry configuration across joins and upgrades. Kubeadm passes the custom registry to the kubelet for a custom pause container. ([#70603](https://github.com/kubernetes/kubernetes/pull/70603), [@chuckha](https://github.com/chuckha))
|
||||
- `kubeadm reset` now outputs instructions about manual iptables rules cleanup. ([#70874](https://github.com/kubernetes/kubernetes/pull/70874), [@rdodev](https://github.com/rdodev))
|
||||
- kubeadm: remove the AuditPolicyConfiguration feature gate ([#70807](https://github.com/kubernetes/kubernetes/pull/70807), [@Klaven](https://github.com/Klaven))
|
||||
- kubeadm pre-pulls Etcd image only if external Etcd is not used and ([#70743](https://github.com/kubernetes/kubernetes/pull/70743), [@bart0sh](https://github.com/bart0sh))
|
||||
- kubeadm: UnifiedControlPlaneImage is replaced by UseHyperKubeImage boolean value. ([#70793](https://github.com/kubernetes/kubernetes/pull/70793), [@rosti](https://github.com/rosti))
|
||||
- For kube-up and derived configurations, CoreDNS will honor master taints, for consistency with kube-dns behavior. ([#70868](https://github.com/kubernetes/kubernetes/pull/70868), [@justinsb](https://github.com/justinsb))
|
||||
- Recognize newer docker versions without -ce/-ee suffix: 18.09.0 ([#71001](https://github.com/kubernetes/kubernetes/pull/71001), [@thomas-riccardi](https://github.com/thomas-riccardi))
|
||||
- Any external provider should be aware the cloud-provider interface should be imported from :- ([#68310](https://github.com/kubernetes/kubernetes/pull/68310), [@cheftako](https://github.com/cheftako))
|
||||
- Fixed 'kubeadm upgrade' infinite loop waiting for pod restart ([#69886](https://github.com/kubernetes/kubernetes/pull/69886), [@bart0sh](https://github.com/bart0sh))
|
||||
- Bumped addon-manager to v8.8 ([#69337](https://github.com/kubernetes/kubernetes/pull/69337), [@MrHohn](https://github.com/MrHohn))
|
||||
- GCE: Filter out spammy audit logs from cluster autoscaler. ([#70696](https://github.com/kubernetes/kubernetes/pull/70696), [@loburm](https://github.com/loburm))
|
||||
- GCE: Enable by default audit logging truncating backend. ([#68288](https://github.com/kubernetes/kubernetes/pull/68288), [@loburm](https://github.com/loburm))
|
||||
- Bumped cluster-proportional-autoscaler to 1.3.0 ([#69338](https://github.com/kubernetes/kubernetes/pull/69338), [@MrHohn](https://github.com/MrHohn))
|
||||
- Updated defaultbackend to v1.5 ([#69334](https://github.com/kubernetes/kubernetes/pull/69334), [@bowei](https://github.com/bowei))
|
||||
|
||||
### SIG GCP
|
||||
|
||||
- Added tolerations for Stackdriver Logging and Metadata Agents. ([#69737](https://github.com/kubernetes/kubernetes/pull/69737), [@qingling128](https://github.com/qingling128))
|
||||
- Enabled insertId generation, and updated Stackdriver Logging Agent image to 0.5-1.5.36-1-k8s. This help reduce log duplication and guarantee log order. ([#68920](https://github.com/kubernetes/kubernetes/pull/68920), [@qingling128](https://github.com/qingling128))
|
||||
- Updated crictl to v1.12.0 ([#69033](https://github.com/kubernetes/kubernetes/pull/69033), [@feiskyer](https://github.com/feiskyer))
|
||||
|
||||
### SIG Network
|
||||
|
||||
- Corrected family type (inet6) for ipsets in ipv6-only clusters ([#68436](https://github.com/kubernetes/kubernetes/pull/68436), [@uablrek](https://github.com/uablrek))
|
||||
- kube-proxy argument `hostname-override` can be used to override hostname defined in the configuration file ([#69340](https://github.com/kubernetes/kubernetes/pull/69340), [@stevesloka](https://github.com/stevesloka))
|
||||
- CoreDNS correctly implements DNS spec for Services with externalNames that look like IP addresses. Kube-dns does not follow the spec for the same case, resulting in a behavior change when moving from Kube-dns to CoreDNS. See: [coredns/coredns#2324](https://github.com/coredns/coredns/issues/2324)
|
||||
- IPVS proxier now set net/ipv4/vs/conn_reuse_mode to 0 by default, which will highly improve IPVS proxier performance. ([#71114](https://github.com/kubernetes/kubernetes/pull/71114), [@Lion-Wei](https://github.com/Lion-Wei))
|
||||
- CoreDNS is now version 1.2.6 ([#70799](https://github.com/kubernetes/kubernetes/pull/70799), [@rajansandeep](https://github.com/rajansandeep))
|
||||
- Addon configuration is introduced in the kubeadm config API, while feature flag CoreDNS is now deprecated. ([#70024](https://github.com/kubernetes/kubernetes/pull/70024), [@fabriziopandini](https://github.com/fabriziopandini))
|
||||
|
||||
### SIG Node
|
||||
|
||||
- Fixed a bug in previous releases where a pod could be placed inside another pod's cgroup when specifying --cgroup-root ([#70678](https://github.com/kubernetes/kubernetes/pull/70678), [@dashpole](https://github.com/dashpole))
|
||||
- Optimized calculating stats when only CPU and Memory stats are returned from Kubelet stats/summary http endpoint. ([#68841](https://github.com/kubernetes/kubernetes/pull/68841), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski))
|
||||
- kubelet now supports `log-file` option to write logs directly to a specific file ([#70917](https://github.com/kubernetes/kubernetes/pull/70917), [@dims](https://github.com/dims))
|
||||
- Do not detach volume if mount in progress ([#71145](https://github.com/kubernetes/kubernetes/pull/71145), [@gnufied](https://github.com/gnufied))
|
||||
- The runtimeHandler field on the RuntimeClass resource now accepts the empty string. ([#69550](https://github.com/kubernetes/kubernetes/pull/69550), [@tallclair](https://github.com/tallclair))
|
||||
- kube-apiserver: fixes `procMount` field incorrectly being marked as required in openapi schema ([#69694](https://github.com/kubernetes/kubernetes/pull/69694), [@jessfraz](https://github.com/jessfraz))
|
||||
|
||||
### SIG OpenStack
|
||||
|
||||
- Fixed cloud-controller-manager crash when using OpenStack provider and PersistentVolume initializing controller ([#70459](https://github.com/kubernetes/kubernetes/pull/70459), [@mvladev](https://github.com/mvladev))
|
||||
|
||||
### SIG Release
|
||||
|
||||
- Use debian-base instead of busybox as base image for server images ([#70245](https://github.com/kubernetes/kubernetes/pull/70245), [@ixdy](https://github.com/ixdy))
|
||||
- Images for cloud-controller-manager, kube-apiserver, kube-controller-manager, and kube-scheduler now contain a minimal /etc/nsswitch.conf and should respect /etc/hosts for lookups ([#69238](https://github.com/kubernetes/kubernetes/pull/69238), [@BenTheElder](https://github.com/BenTheElder))
|
||||
|
||||
### SIG Scheduling
|
||||
|
||||
- Added metrics for volume scheduling operations ([#59529](https://github.com/kubernetes/kubernetes/pull/59529), [@wackxu](https://github.com/wackxu))
|
||||
- Improved memory use and performance when processing large numbers of pods containing tolerations ([#65350](https://github.com/kubernetes/kubernetes/pull/65350), [@liggitt](https://github.com/liggitt))
|
||||
- Fixed a bug in the scheduler that could cause the scheduler to go to an infinite loop when all nodes in a zone are removed. ([#69758](https://github.com/kubernetes/kubernetes/pull/69758), [@bsalamat](https://github.com/bsalamat))
|
||||
- Clear pod binding cache on bind error to make sure stale pod binding cache will not be used. ([#71212](https://github.com/kubernetes/kubernetes/pull/71212), [@cofyc](https://github.com/cofyc))
|
||||
- Fixed a scheduler panic due to internal cache inconsistency ([#71063](https://github.com/kubernetes/kubernetes/pull/71063), [@Huang-Wei](https://github.com/Huang-Wei))
|
||||
- Report kube-scheduler unhealthy if leader election is deadlocked. ([#71085](https://github.com/kubernetes/kubernetes/pull/71085), [@bsalamat](https://github.com/bsalamat))
|
||||
- Fixed a potential bug that scheduler preempts unnecessary pods. ([#70898](https://github.com/kubernetes/kubernetes/pull/70898), [@Huang-Wei](https://github.com/Huang-Wei))
|
||||
|
||||
### SIG Storage
|
||||
|
||||
- Fixed CSI volume limits not showing up in node's capacity and allocatable ([#70540](https://github.com/kubernetes/kubernetes/pull/70540), [@gnufied](https://github.com/gnufied))
|
||||
- CSI drivers now have access to mountOptions defined on the storage class when attaching volumes. ([#67898](https://github.com/kubernetes/kubernetes/pull/67898), [@bswartz](https://github.com/bswartz))
|
||||
- change default azure file mount permission to 0777 ([#69854](https://github.com/kubernetes/kubernetes/pull/69854), [@andyzhangx](https://github.com/andyzhangx))
|
||||
- Fixed subpath in containerized kubelet. ([#69565](https://github.com/kubernetes/kubernetes/pull/69565), [@jsafrane](https://github.com/jsafrane))
|
||||
- Fixed panic on iSCSI volume tear down. ([#69140](https://github.com/kubernetes/kubernetes/pull/69140), [@jsafrane](https://github.com/jsafrane))
|
||||
- CSIPersistentVolume feature, i.e. PersistentVolumes with CSIPersistentVolumeSource, is GA. ([#69929](https://github.com/kubernetes/kubernetes/pull/69929), [@jsafrane](https://github.com/jsafrane))
|
||||
- Fixed CSIDriver API object to allow missing fields. ([#69331](https://github.com/kubernetes/kubernetes/pull/69331), [@jsafrane](https://github.com/jsafrane))
|
||||
- Flex volume plugins now support expandvolume (to increase underlying volume capacity) and expanfs (resize filesystem) commands that Flex plugin authors can implement to support expanding in use Flex PersistentVolumes ([#67851](https://github.com/kubernetes/kubernetes/pull/67851), [@aniket-s-kulkarni](https://github.com/aniket-s-kulkarni))
|
||||
- Enabled AttachVolumeLimit feature ([#69225](https://github.com/kubernetes/kubernetes/pull/69225), [@gnufied](https://github.com/gnufied))
|
||||
- The default storage class annotation for the storage addons has been changed to use the GA variant ([#68345](https://github.com/kubernetes/kubernetes/pull/68345), [@smelchior](https://github.com/smelchior))
|
||||
- GlusterFS PersistentVolumes sources can now reference endpoints in any namespace using the `spec.glusterfs.endpointsNamespace` field. Ensure all kubelets are upgraded to 1.13+ before using this capability. ([#60195](https://github.com/kubernetes/kubernetes/pull/60195), [@humblec](https://github.com/humblec))
|
||||
- Fixed GetVolumeLimits log flushing issue ([#69558](https://github.com/kubernetes/kubernetes/pull/69558), [@andyzhangx](https://github.com/andyzhangx))
|
||||
- The `MountPropagation` feature is unconditionally enabled in v1.13, and can no longer be disabled. ([#68230](https://github.com/kubernetes/kubernetes/pull/68230), [@bertinatto](https://github.com/bertinatto))
|
||||
|
||||
### SIG Windows
|
||||
|
||||
- `kubelet --system-reserved` and `--kube-reserved` are supported now on Windows nodes ([#69960](https://github.com/kubernetes/kubernetes/pull/69960), [@feiskyer](https://github.com/feiskyer))
|
||||
- Windows runtime endpoints is now switched to `npipe:////./pipe/dockershim` from `tcp://localhost:3735`. ([#69516](https://github.com/kubernetes/kubernetes/pull/69516), [@feiskyer](https://github.com/feiskyer))
|
||||
- Fixed service issues with named targetPort for Windows ([#70076](https://github.com/kubernetes/kubernetes/pull/70076), [@feiskyer](https://github.com/feiskyer))
|
||||
- Handle Windows named pipes in host mounts. ([#69484](https://github.com/kubernetes/kubernetes/pull/69484), [@ddebroy](https://github.com/ddebroy))
|
||||
- Fixed inconsistency in windows kernel proxy when updating HNS policy. ([#68923](https://github.com/kubernetes/kubernetes/pull/68923), [@delulu](https://github.com/delulu))
|
||||
|
||||
## External Dependencies
|
||||
|
||||
- Default etcd server is unchanged at v3.2.24 since Kubernetes 1.12. ([#68318](https://github.com/kubernetes/kubernetes/pull/68318))
|
||||
- The list of validated docker versions remain unchanged at 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06 since Kubernetes 1.12. ([#68495](https://github.com/kubernetes/kubernetes/pull/68495))
|
||||
- The default Go version was updated to 1.11.2. ([#70665](https://github.com/kubernetes/kubernetes/pull/70665))
|
||||
- The minimum supported Go version was updated to 1.11.2 ([#69386](https://github.com/kubernetes/kubernetes/pull/69386))
|
||||
- CNI is unchanged at v0.6.0 since Kubernetes 1.10 ([#51250](https://github.com/kubernetes/kubernetes/pull/51250))
|
||||
- CSI is updated to 1.0.0. Pre-1.0.0 API support is now deprecated. ([#71020](https://github.com/kubernetes/kubernetes/pull/71020)])
|
||||
- The dashboard add-on has been updated to v1.10.0. ([#68450](https://github.com/kubernetes/kubernetes/pull/68450))
|
||||
- Heapster remains at v1.6.0-beta, but is now retired in Kubernetes 1.13 ([#67074](https://github.com/kubernetes/kubernetes/pull/67074))
|
||||
- Cluster Autoscaler has been upgraded to v1.13.0 ([#71513](https://github.com/kubernetes/kubernetes/pull/71513))
|
||||
- kube-dns is unchanged at v1.14.13 since Kubernetes 1.12 ([#68900](https://github.com/kubernetes/kubernetes/pull/68900))
|
||||
- Influxdb is unchanged at v1.3.3 since Kubernetes 1.10 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319))
|
||||
- Grafana is unchanged at v4.4.3 since Kubernetes 1.10 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319))
|
||||
- Kibana has been upgraded to v6.3.2. ([#67582](https://github.com/kubernetes/kubernetes/pull/67582))
|
||||
- CAdvisor has been updated to v0.32.0 ([#70964](https://github.com/kubernetes/kubernetes/pull/70964))
|
||||
- fluentd-gcp-scaler has been updated to v0.5.0 ([#68837](https://github.com/kubernetes/kubernetes/pull/68837))
|
||||
- Fluentd in fluentd-elasticsearch is unchanged at v1.2.4 since Kubernetes 1.11 ([#67434](https://github.com/kubernetes/kubernetes/pull/67434))
|
||||
- fluentd-elasticsearch has been updated to v2.2.1 ([#68012](https://github.com/kubernetes/kubernetes/pull/68012))
|
||||
- The fluent-plugin-kubernetes_metadata_filter plugin in fluentd-elasticsearch is unchanged at 2.0.0 since Kubernetes 1.12 ([#67544](https://github.com/kubernetes/kubernetes/pull/67544))
|
||||
- fluentd-gcp has been updated to v3.2.0 ([#70954](https://github.com/kubernetes/kubernetes/pull/70954))
|
||||
- OIDC authentication is unchanged at coreos/go-oidc v2 since Kubernetes 1.10 ([#58544](https://github.com/kubernetes/kubernetes/pull/58544))
|
||||
- Calico was updated to v3.3.1 ([#70932](https://github.com/kubernetes/kubernetes/pull/70932))
|
||||
- Upgraded crictl on GCE to v1.12.0 ([#69033](https://github.com/kubernetes/kubernetes/pull/69033))
|
||||
- CoreDNS has been updated to v1.2.6 ([#70799](https://github.com/kubernetes/kubernetes/pull/70799))
|
||||
- event-exporter has been updated to v0.2.3 ([#67691](https://github.com/kubernetes/kubernetes/pull/67691))
|
||||
- Es-image remains unchanged at Elasticsearch 6.3.2 since Kubernetes 1.12 ([#67484](https://github.com/kubernetes/kubernetes/pull/67484))
|
||||
- metrics-server remains unchanged at v0.3.1 since Kubernetes 1.12 ([#68746](https://github.com/kubernetes/kubernetes/pull/68746))
|
||||
- GLBC remains unchanged at v1.2.3 since Kubernetes 1.12 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793))
|
||||
- Ingress-gce remains unchanged at v1.2.3 since Kubernetes 1.12 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793))
|
||||
- ip-masq-agen remains unchanged at v2.1.1 since Kubernetes 1.12 ([#67916](https://github.com/kubernetes/kubernetes/pull/67916))
|
||||
|
||||
# v1.13.0-rc.2
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
@ -164,9 +804,8 @@ filename | sha512 hash
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* CVE-2018-1002105: Fix critical security issue in kube-apiserver upgrade request proxy handler ([#71411](https://github.com/kubernetes/kubernetes/issues/71411), [@liggitt](https://github.com/liggitt))
|
||||
* Update Cluster Autoscaler version to 1.13.0-rc.2. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0-rc.2 ([#71452](https://github.com/kubernetes/kubernetes/pull/71452), [@losipiuk](https://github.com/losipiuk))
|
||||
* Fixes an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt))
|
||||
* Fixes an issue with stuck connections handling error responses ([#71412](https://github.com/kubernetes/kubernetes/pull/71412), [@liggitt](https://github.com/liggitt))
|
||||
* Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128))
|
||||
* fixes a runtime error occuring when sorting the output of `kubectl get` with empty results ([#70740](https://github.com/kubernetes/kubernetes/pull/70740), [@mfpierre](https://github.com/mfpierre))
|
||||
* fix azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx))
|
||||
|
23
vendor/k8s.io/kubernetes/Godeps/Godeps.json
generated
vendored
23
vendor/k8s.io/kubernetes/Godeps/Godeps.json
generated
vendored
@ -11,6 +11,7 @@
|
||||
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle",
|
||||
"github.com/kubernetes/repo-infra/kazel",
|
||||
"k8s.io/kube-openapi/cmd/openapi-gen",
|
||||
"golang.org/x/lint/golint",
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
@ -1601,12 +1602,12 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/jsonpointer",
|
||||
"Comment": "v0.17.2",
|
||||
"Comment": "v0.18.0",
|
||||
"Rev": "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/jsonreference",
|
||||
"Comment": "v0.17.2",
|
||||
"Comment": "v0.18.0",
|
||||
"Rev": "8483a886a90412cd6858df4ea3483dce9c8e35a3"
|
||||
},
|
||||
{
|
||||
@ -1636,7 +1637,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/validate",
|
||||
"Comment": "v0.17.2",
|
||||
"Comment": "v0.18.0",
|
||||
"Rev": "d2eab7d93009e9215fc85b2faa2c2f2a98c2af48"
|
||||
},
|
||||
{
|
||||
@ -3422,6 +3423,14 @@
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "de0752318171da717af4ce24d0a2e8626afaeb11"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/lint",
|
||||
"Rev": "8f45f776aaf18cebc8d65861cc70c33c60471952"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/lint/golint",
|
||||
"Rev": "8f45f776aaf18cebc8d65861cc70c33c60471952"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "0ed95abb35c445290478a5348a7b38bb154135fd"
|
||||
@ -3586,6 +3595,14 @@
|
||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/gcexportdata",
|
||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/gcimporter15",
|
||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/vcs",
|
||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
||||
|
140
vendor/k8s.io/kubernetes/Godeps/LICENSES
generated
vendored
140
vendor/k8s.io/kubernetes/Godeps/LICENSES
generated
vendored
@ -98293,6 +98293,76 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/golang.org/x/lint licensed under: =
|
||||
|
||||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
= vendor/golang.org/x/lint/LICENSE 4c728948788b1a02f33ae4e906546eef
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/golang.org/x/lint/golint licensed under: =
|
||||
|
||||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
= vendor/golang.org/x/lint/LICENSE 4c728948788b1a02f33ae4e906546eef
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/golang.org/x/net/context licensed under: =
|
||||
|
||||
@ -99728,6 +99798,76 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/golang.org/x/tools/go/gcexportdata licensed under: =
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/golang.org/x/tools/go/gcimporter15 licensed under: =
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/golang.org/x/tools/go/vcs licensed under: =
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json
generated
vendored
2
vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json
generated
vendored
@ -2,7 +2,7 @@
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "Kubernetes",
|
||||
"version": "v1.13.0"
|
||||
"version": "v1.13.2"
|
||||
},
|
||||
"paths": {
|
||||
"/api/": {
|
||||
|
4
vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile
generated
vendored
4
vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile
generated
vendored
@ -15,7 +15,7 @@
|
||||
# This file creates a standard build environment for building cross
|
||||
# platform go binary for the architecture kubernetes cares about.
|
||||
|
||||
FROM golang:1.11.2
|
||||
FROM golang:1.11.4
|
||||
|
||||
ENV GOARM 7
|
||||
ENV KUBE_DYNAMIC_CROSSPLATFORMS \
|
||||
@ -44,7 +44,7 @@ RUN apt-get update \
|
||||
# Use dynamic cgo linking for architectures other than amd64 for the server platforms
|
||||
# To install crossbuild essential for other architectures add the following repository.
|
||||
RUN echo "deb http://archive.ubuntu.com/ubuntu xenial main universe" > /etc/apt/sources.list.d/cgocrosscompiling.list \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 40976EAF437D05B5 3B4FE6ACC0B21F32 \
|
||||
&& apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 40976EAF437D05B5 3B4FE6ACC0B21F32 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y build-essential \
|
||||
&& for platform in ${KUBE_DYNAMIC_CROSSPLATFORMS}; do apt-get install -y crossbuild-essential-${platform}; done \
|
||||
|
2
vendor/k8s.io/kubernetes/build/build-image/cross/VERSION
generated
vendored
2
vendor/k8s.io/kubernetes/build/build-image/cross/VERSION
generated
vendored
@ -1 +1 @@
|
||||
v1.11.2-1
|
||||
v1.11.4-1
|
||||
|
6
vendor/k8s.io/kubernetes/build/root/WORKSPACE
generated
vendored
6
vendor/k8s.io/kubernetes/build/root/WORKSPACE
generated
vendored
@ -4,8 +4,8 @@ load("//build:workspace.bzl", "CRI_TOOLS_VERSION")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "f87fa87475ea107b3c69196f39c82b7bbf58fe27c62a338684c20ca17d1d8613",
|
||||
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.16.2/rules_go-0.16.2.tar.gz"),
|
||||
sha256 = "7be7dc01f1e0afdba6c8eb2b43d2fa01c743be1b9273ab1eaf6c233df078d705",
|
||||
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.16.5/rules_go-0.16.5.tar.gz"),
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@ -49,7 +49,7 @@ load("@io_bazel_rules_docker//docker:docker.bzl", "docker_pull", "docker_reposit
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.11.2",
|
||||
go_version = "1.11.4",
|
||||
)
|
||||
|
||||
docker_repositories()
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/bgpconfigurations-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/bgpconfigurations-crd.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgpconfigurations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPConfiguration
|
||||
plural: bgpconfigurations
|
||||
singular: bgpconfiguration
|
2
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
@ -31,7 +31,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
@ -1089,6 +1089,11 @@ EOF
|
||||
}
|
||||
|
||||
function create-node-problem-detector-kubeconfig {
|
||||
local apiserver_address="${1}"
|
||||
if [[ -z "${apiserver_address}" ]]; then
|
||||
echo "Must provide API server address to create node-problem-detector kubeconfig file!"
|
||||
exit 1
|
||||
fi
|
||||
echo "Creating node-problem-detector kubeconfig file"
|
||||
mkdir -p /var/lib/node-problem-detector
|
||||
cat <<EOF >/var/lib/node-problem-detector/kubeconfig
|
||||
@ -1101,6 +1106,7 @@ users:
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: https://${apiserver_address}
|
||||
certificate-authority-data: ${CA_CERT}
|
||||
contexts:
|
||||
- context:
|
||||
@ -1230,7 +1236,7 @@ function start-node-problem-detector {
|
||||
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
|
||||
# TODO(random-liu): Handle this for alternative container runtime.
|
||||
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
|
||||
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
|
||||
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/docker-monitor-counter.json"
|
||||
echo "Using node problem detector binary at ${npd_bin}"
|
||||
local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
|
||||
flags+=" --logtostderr"
|
||||
@ -2622,7 +2628,7 @@ function setup-node-termination-handler-manifest {
|
||||
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
|
||||
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
|
||||
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Setups manifests for ingress controller and gce-specific policies for service controller.
|
||||
@ -2826,7 +2832,7 @@ function main() {
|
||||
create-kubeproxy-user-kubeconfig
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
create-node-problem-detector-kubeconfig
|
||||
create-node-problem-detector-kubeconfig ${KUBERNETES_MASTER_NAME}
|
||||
fi
|
||||
fi
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
generated
vendored
@ -26,8 +26,8 @@ set -o pipefail
|
||||
### Hardcoded constants
|
||||
DEFAULT_CNI_VERSION="v0.6.0"
|
||||
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
|
||||
DEFAULT_NPD_VERSION="v0.5.0"
|
||||
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
|
||||
DEFAULT_NPD_VERSION="v0.6.0"
|
||||
DEFAULT_NPD_SHA1="a28e960a21bb74bc0ae09c267b6a340f30e5b3a6"
|
||||
DEFAULT_CRICTL_VERSION="v1.12.0"
|
||||
DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f"
|
||||
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
@ -17,7 +17,7 @@
|
||||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.13.0",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.13.1",
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/health-check",
|
||||
|
1
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/options.go
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/options.go
generated
vendored
@ -104,6 +104,7 @@ func NewOptions() (*Options, error) {
|
||||
},
|
||||
}
|
||||
|
||||
o.Authentication.TolerateInClusterLookupFailure = true
|
||||
o.Authentication.RemoteKubeConfigFileOptional = true
|
||||
o.Authorization.RemoteKubeConfigFileOptional = true
|
||||
o.Authorization.AlwaysAllowPaths = []string{"/healthz"}
|
||||
|
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go
generated
vendored
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go
generated
vendored
@ -316,7 +316,7 @@ func NewJoin(cfgPath string, defaultcfg *kubeadmapiv1beta1.JoinConfiguration, ig
|
||||
internalCfg.NodeRegistration.CRISocket = defaultcfg.NodeRegistration.CRISocket
|
||||
}
|
||||
|
||||
if defaultcfg.ControlPlane != nil {
|
||||
if internalCfg.ControlPlane != nil {
|
||||
if err := configutil.VerifyAPIServerBindAddress(internalCfg.ControlPlane.LocalAPIEndpoint.AdvertiseAddress); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -586,10 +586,12 @@ func (j *Join) PostInstallControlPlane(initConfiguration *kubeadmapi.InitConfigu
|
||||
func waitForTLSBootstrappedClient() error {
|
||||
fmt.Println("[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...")
|
||||
|
||||
kubeletKubeConfig := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, kubeadmconstants.TLSBootstrapTimeout, func() (bool, error) {
|
||||
_, err := os.Stat(kubeletKubeConfig)
|
||||
// Check that we can create a client set out of the kubelet kubeconfig. This ensures not
|
||||
// only that the kubeconfig file exists, but that other files required by it also exist (like
|
||||
// client certificate and key)
|
||||
_, err := kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetKubeletKubeConfigPath())
|
||||
return (err == nil), nil
|
||||
})
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/bootstraptoken.go
generated
vendored
9
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/bootstraptoken.go
generated
vendored
@ -18,7 +18,6 @@ package phases
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@ -26,7 +25,6 @@ import (
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
clusterinfophase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
|
||||
nodebootstraptokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||
"k8s.io/kubernetes/pkg/util/normalizer"
|
||||
@ -51,7 +49,7 @@ var (
|
||||
type bootstrapTokenData interface {
|
||||
Cfg() *kubeadmapi.InitConfiguration
|
||||
Client() (clientset.Interface, error)
|
||||
KubeConfigDir() string
|
||||
KubeConfigPath() string
|
||||
SkipTokenPrint() bool
|
||||
Tokens() []string
|
||||
}
|
||||
@ -66,7 +64,7 @@ func NewBootstrapTokenPhase() workflow.Phase {
|
||||
Long: bootstrapTokenLongDesc,
|
||||
InheritFlags: []string{
|
||||
options.CfgPath,
|
||||
options.KubeconfigDir,
|
||||
options.KubeconfigPath,
|
||||
options.SkipTokenPrint,
|
||||
},
|
||||
Run: runBoostrapToken,
|
||||
@ -113,8 +111,7 @@ func runBoostrapToken(c workflow.RunData) error {
|
||||
}
|
||||
|
||||
// Create the cluster-info ConfigMap with the associated RBAC rules
|
||||
adminKubeConfigPath := filepath.Join(data.KubeConfigDir(), kubeadmconstants.AdminKubeConfigFileName)
|
||||
if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, adminKubeConfigPath); err != nil {
|
||||
if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, data.KubeConfigPath()); err != nil {
|
||||
return errors.Wrap(err, "error creating bootstrap ConfigMap")
|
||||
}
|
||||
if err := clusterinfophase.CreateClusterInfoRBACRules(client); err != nil {
|
||||
|
3
vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/etcd/etcd.go
generated
vendored
3
vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/etcd/etcd.go
generated
vendored
@ -196,12 +196,13 @@ func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error syncing endpoints with etc")
|
||||
}
|
||||
klog.V(1).Infof("update etcd endpoints: %s", strings.Join(etcdClient.Endpoints, ","))
|
||||
|
||||
return etcdClient, nil
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
func (c Client) Sync() error {
|
||||
func (c *Client) Sync() error {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: c.Endpoints,
|
||||
DialTimeout: 20 * time.Second,
|
||||
|
1
vendor/k8s.io/kubernetes/hack/godep-save.sh
generated
vendored
1
vendor/k8s.io/kubernetes/hack/godep-save.sh
generated
vendored
@ -63,6 +63,7 @@ REQUIRED_BINS=(
|
||||
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle"
|
||||
"github.com/kubernetes/repo-infra/kazel"
|
||||
"k8s.io/kube-openapi/cmd/openapi-gen"
|
||||
"golang.org/x/lint/golint"
|
||||
"./..."
|
||||
)
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/hack/jenkins/benchmark-dockerized.sh
generated
vendored
8
vendor/k8s.io/kubernetes/hack/jenkins/benchmark-dockerized.sh
generated
vendored
@ -39,9 +39,9 @@ retry go get github.com/cespare/prettybench
|
||||
export KUBE_RACE=" "
|
||||
# Disable coverage report
|
||||
export KUBE_COVER="n"
|
||||
export ARTIFACTS_DIR=${WORKSPACE}/_artifacts
|
||||
export ARTIFACTS=${ARTIFACTS:-"${WORKSPACE}/artifacts"}
|
||||
|
||||
mkdir -p "${ARTIFACTS_DIR}"
|
||||
mkdir -p "${ARTIFACTS}"
|
||||
cd /go/src/k8s.io/kubernetes
|
||||
|
||||
./hack/install-etcd.sh
|
||||
@ -49,5 +49,5 @@ cd /go/src/k8s.io/kubernetes
|
||||
# Run the benchmark tests and pretty-print the results into a separate file.
|
||||
make test-integration WHAT="$*" KUBE_TEST_ARGS="-run='XXX' -bench=. -benchmem" \
|
||||
| tee \
|
||||
>(prettybench -no-passthrough > ${ARTIFACTS_DIR}/BenchmarkResults.txt) \
|
||||
>(go run test/integration/benchmark/jsonify/main.go ${ARTIFACTS_DIR}/BenchmarkResults_benchmark_$(date -u +%Y-%m-%dT%H:%M:%SZ).json || cat > /dev/null)
|
||||
>(prettybench -no-passthrough > ${ARTIFACTS}/BenchmarkResults.txt) \
|
||||
>(go run test/integration/benchmark/jsonify/main.go ${ARTIFACTS}/BenchmarkResults_benchmark_$(date -u +%Y-%m-%dT%H:%M:%SZ).json || cat > /dev/null)
|
||||
|
6
vendor/k8s.io/kubernetes/hack/jenkins/gotest.sh
generated
vendored
6
vendor/k8s.io/kubernetes/hack/jenkins/gotest.sh
generated
vendored
@ -36,8 +36,10 @@ go get -u github.com/jstemmer/go-junit-report
|
||||
|
||||
# Enable the Go race detector.
|
||||
export KUBE_RACE=-race
|
||||
# Produce a JUnit-style XML test report for Jenkins.
|
||||
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/_artifacts
|
||||
# Set artifacts directory
|
||||
export ARTIFACTS=${ARTIFACTS:-"${WORKSPACE}/artifacts"}
|
||||
# Produce a JUnit-style XML test report
|
||||
export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}"
|
||||
# Save the verbose stdout as well.
|
||||
export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y
|
||||
|
||||
|
9
vendor/k8s.io/kubernetes/hack/jenkins/test-dockerized.sh
generated
vendored
9
vendor/k8s.io/kubernetes/hack/jenkins/test-dockerized.sh
generated
vendored
@ -39,15 +39,16 @@ retry go get github.com/jstemmer/go-junit-report
|
||||
export KUBE_RACE=-race
|
||||
# Disable coverage report
|
||||
export KUBE_COVER="n"
|
||||
# Produce a JUnit-style XML test report for Jenkins.
|
||||
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts
|
||||
export ARTIFACTS_DIR=${WORKSPACE}/artifacts
|
||||
# Set artifacts directory
|
||||
export ARTIFACTS=${ARTIFACTS:-"${WORKSPACE}/artifacts"}
|
||||
# Produce a JUnit-style XML test report
|
||||
export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}"
|
||||
# Save the verbose stdout as well.
|
||||
export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y
|
||||
export KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=4
|
||||
export LOG_LEVEL=4
|
||||
|
||||
cd /go/src/k8s.io/kubernetes
|
||||
cd "${GOPATH}/src/k8s.io/kubernetes"
|
||||
|
||||
make generated_files
|
||||
go install ./cmd/...
|
||||
|
8
vendor/k8s.io/kubernetes/hack/jenkins/verify-dockerized.sh
generated
vendored
8
vendor/k8s.io/kubernetes/hack/jenkins/verify-dockerized.sh
generated
vendored
@ -31,13 +31,13 @@ retry() {
|
||||
|
||||
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
||||
|
||||
# Produce a JUnit-style XML test report
|
||||
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts
|
||||
# Set artifacts directory
|
||||
export ARTIFACTS_DIR=${WORKSPACE}/artifacts
|
||||
export ARTIFACTS=${ARTIFACTS:-"${WORKSPACE}/artifacts"}
|
||||
# Produce a JUnit-style XML test report
|
||||
export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}"
|
||||
|
||||
export LOG_LEVEL=4
|
||||
|
||||
cd /go/src/k8s.io/kubernetes
|
||||
cd "${GOPATH}/src/k8s.io/kubernetes"
|
||||
|
||||
make verify
|
||||
|
4
vendor/k8s.io/kubernetes/hack/lib/etcd.sh
generated
vendored
4
vendor/k8s.io/kubernetes/hack/lib/etcd.sh
generated
vendored
@ -70,8 +70,8 @@ kube::etcd::start() {
|
||||
|
||||
# Start etcd
|
||||
ETCD_DIR=${ETCD_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-etcd.XXXXXX)}
|
||||
if [[ -d "${ARTIFACTS_DIR:-}" ]]; then
|
||||
ETCD_LOGFILE="${ARTIFACTS_DIR}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$"
|
||||
if [[ -d "${ARTIFACTS:-}" ]]; then
|
||||
ETCD_LOGFILE="${ARTIFACTS}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$"
|
||||
else
|
||||
ETCD_LOGFILE=${ETCD_LOGFILE:-"/dev/null"}
|
||||
fi
|
||||
|
4
vendor/k8s.io/kubernetes/hack/make-rules/test.sh
generated
vendored
4
vendor/k8s.io/kubernetes/hack/make-rules/test.sh
generated
vendored
@ -120,6 +120,10 @@ KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
|
||||
# once we have multiple group supports
|
||||
# Create a junit-style XML test report in this directory if set.
|
||||
KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-}
|
||||
# If KUBE_JUNIT_REPORT_DIR is unset, and ARTIFACTS is set, then have them match.
|
||||
if [[ -z "${KUBE_JUNIT_REPORT_DIR:-}" && -n "${ARTIFACTS:-}" ]]; then
|
||||
export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}"
|
||||
fi
|
||||
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
|
||||
# set.
|
||||
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
|
||||
|
5
vendor/k8s.io/kubernetes/hack/make-rules/verify.sh
generated
vendored
5
vendor/k8s.io/kubernetes/hack/make-rules/verify.sh
generated
vendored
@ -21,6 +21,11 @@ set -o pipefail
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
# If KUBE_JUNIT_REPORT_DIR is unset, and ARTIFACTS is set, then have them match.
|
||||
if [[ -z "${KUBE_JUNIT_REPORT_DIR:-}" && -n "${ARTIFACTS:-}" ]]; then
|
||||
export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}"
|
||||
fi
|
||||
|
||||
# include shell2junit library
|
||||
source "${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/hack/testdata/diff/configmap.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/hack/testdata/diff/configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
17
vendor/k8s.io/kubernetes/hack/testdata/diff/deployment.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/hack/testdata/diff/deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
name: test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
8
vendor/k8s.io/kubernetes/hack/testdata/diff/pod.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/hack/testdata/diff/pod.yaml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
4
vendor/k8s.io/kubernetes/hack/testdata/diff/secret.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/hack/testdata/diff/secret.yaml
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: test
|
8
vendor/k8s.io/kubernetes/hack/verify-godeps.sh
generated
vendored
8
vendor/k8s.io/kubernetes/hack/verify-godeps.sh
generated
vendored
@ -94,9 +94,9 @@ pushd "${KUBE_ROOT}" > /dev/null 2>&1
|
||||
echo "(The above output can be saved as godepdiff.patch if you're not running this locally)" >&2
|
||||
echo "(The patch file should also be exported as a build artifact if run through CI)" >&2
|
||||
KEEP_TMP=true
|
||||
if [[ -f godepdiff.patch && -d "${ARTIFACTS_DIR:-}" ]]; then
|
||||
if [[ -f godepdiff.patch && -d "${ARTIFACTS:-}" ]]; then
|
||||
echo "Copying patch to artifacts.."
|
||||
cp godepdiff.patch "${ARTIFACTS_DIR:-}/"
|
||||
cp godepdiff.patch "${ARTIFACTS:-}/"
|
||||
fi
|
||||
ret=1
|
||||
fi
|
||||
@ -111,9 +111,9 @@ pushd "${KUBE_ROOT}" > /dev/null 2>&1
|
||||
echo "(The above output can be saved as godepdiff.patch if you're not running this locally)" >&2
|
||||
echo "(The patch file should also be exported as a build artifact if run through CI)" >&2
|
||||
KEEP_TMP=true
|
||||
if [[ -f vendordiff.patch && -d "${ARTIFACTS_DIR:-}" ]]; then
|
||||
if [[ -f vendordiff.patch && -d "${ARTIFACTS:-}" ]]; then
|
||||
echo "Copying patch to artifacts.."
|
||||
cp vendordiff.patch "${ARTIFACTS_DIR:-}/"
|
||||
cp vendordiff.patch "${ARTIFACTS:-}/"
|
||||
fi
|
||||
ret=1
|
||||
fi
|
||||
|
14
vendor/k8s.io/kubernetes/hack/verify-golint.sh
generated
vendored
14
vendor/k8s.io/kubernetes/hack/verify-golint.sh
generated
vendored
@ -23,11 +23,15 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::verify_go_version
|
||||
|
||||
if ! which golint > /dev/null; then
|
||||
echo 'Can not find golint, install with:'
|
||||
echo 'go get -u golang.org/x/lint/golint'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that we find the binaries we build before anything else.
|
||||
export GOBIN="${KUBE_OUTPUT_BINPATH}"
|
||||
PATH="${GOBIN}:${PATH}"
|
||||
|
||||
# Install golint from vendor
|
||||
echo 'installing golint from vendor'
|
||||
go install k8s.io/kubernetes/vendor/golang.org/x/lint/golint
|
||||
|
||||
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD
generated
vendored
@ -26,7 +26,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["defaults_test.go"],
|
||||
srcs = [
|
||||
"conversion_test.go",
|
||||
"defaults_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
@ -37,6 +40,7 @@ go_test(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go
generated
vendored
@ -185,8 +185,10 @@ func Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *au
|
||||
}
|
||||
|
||||
func Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta1.PodsMetricSource, s conversion.Scope) error {
|
||||
targetAverageValue := *in.Target.AverageValue
|
||||
out.TargetAverageValue = targetAverageValue
|
||||
if in.Target.AverageValue != nil {
|
||||
targetAverageValue := *in.Target.AverageValue
|
||||
out.TargetAverageValue = targetAverageValue
|
||||
}
|
||||
|
||||
out.MetricName = in.Metric.Name
|
||||
out.Selector = in.Metric.Selector
|
||||
@ -247,8 +249,10 @@ func Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *au
|
||||
}
|
||||
out.MetricName = in.Metric.Name
|
||||
out.Selector = in.Metric.Selector
|
||||
currentAverageValue := *in.Current.AverageValue
|
||||
out.AverageValue = ¤tAverageValue
|
||||
if in.Current.AverageValue != nil {
|
||||
currentAverageValue := *in.Current.AverageValue
|
||||
out.AverageValue = ¤tAverageValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
84
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion_test.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion_test.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
)
|
||||
|
||||
// Testing nil pointer panic uncovered by #70806
|
||||
// TODO(yue9944882): Test nil/empty conversion across all resource types
|
||||
func TestNilOrEmptyConversion(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
assert.NoError(t, addConversionFuncs(scheme))
|
||||
|
||||
testCases := []struct {
|
||||
obj1 interface{}
|
||||
obj2 interface{}
|
||||
}{
|
||||
{
|
||||
obj1: &autoscaling.ExternalMetricSource{},
|
||||
obj2: &v2beta1.ExternalMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ExternalMetricStatus{},
|
||||
obj2: &v2beta1.ExternalMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.PodsMetricSource{},
|
||||
obj2: &v2beta1.PodsMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.PodsMetricStatus{},
|
||||
obj2: &v2beta1.PodsMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ObjectMetricSource{},
|
||||
obj2: &v2beta1.ObjectMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ObjectMetricStatus{},
|
||||
obj2: &v2beta1.ObjectMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ResourceMetricSource{},
|
||||
obj2: &v2beta1.ResourceMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ResourceMetricStatus{},
|
||||
obj2: &v2beta1.ResourceMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.HorizontalPodAutoscaler{},
|
||||
obj2: &v2beta1.HorizontalPodAutoscaler{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.MetricTarget{},
|
||||
obj2: &v2beta1.CrossVersionObjectReference{},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
assert.NoError(t, scheme.Convert(testCase.obj1, testCase.obj2, nil))
|
||||
assert.NoError(t, scheme.Convert(testCase.obj2, testCase.obj1, nil))
|
||||
}
|
||||
}
|
@ -68,6 +68,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
@ -132,6 +133,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@ -67,7 +67,17 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
newVM := compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: vm.StorageProfile.OsDisk,
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -77,7 +87,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
@ -126,7 +136,18 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
newVM := compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: vm.StorageProfile.OsDisk,
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
@ -135,7 +156,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
|
||||
if err != nil {
|
||||
klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -96,7 +96,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
for _, address := range ipAddress.IPV4.IPAddress {
|
||||
if len(ipAddress.IPV4.IPAddress) > 0 && len(ipAddress.IPV4.IPAddress[0].PrivateIP) > 0 {
|
||||
address := ipAddress.IPV4.IPAddress[0]
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
@ -108,7 +109,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, address := range ipAddress.IPV6.IPAddress {
|
||||
if len(ipAddress.IPV6.IPAddress) > 0 && len(ipAddress.IPV6.IPAddress[0].PrivateIP) > 0 {
|
||||
address := ipAddress.IPV6.IPAddress[0]
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
@ -120,6 +122,13 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(addresses) == 1 {
|
||||
// No IP addresses is got from instance metadata service, clean up cache and report errors.
|
||||
az.metadata.imsCache.Delete(metadataCacheKey)
|
||||
return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
|
121
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
121
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
@ -21,10 +21,12 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
@ -216,3 +218,122 @@ func TestInstanceShutdownByProviderID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAddresses(t *testing.T) {
|
||||
cloud := getTestCloud()
|
||||
cloud.Config.UseInstanceMetadata = true
|
||||
metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}`
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
nodeName string
|
||||
ipV4 string
|
||||
ipV6 string
|
||||
ipV4Public string
|
||||
ipV6Public string
|
||||
expected []v1.NodeAddress
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "NodeAddresses should get both ipV4 and ipV6 private addresses",
|
||||
nodeName: "vm1",
|
||||
ipV4: "10.240.0.1",
|
||||
ipV6: "1111:11111:00:00:1111:1111:000:111",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "10.240.0.1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1111:11111:00:00:1111:1111:000:111",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should report error when IPs are empty",
|
||||
nodeName: "vm1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should get ipV4 private and public addresses",
|
||||
nodeName: "vm1",
|
||||
ipV4: "10.240.0.1",
|
||||
ipV4Public: "9.9.9.9",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "10.240.0.1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: "9.9.9.9",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should get ipV6 private and public addresses",
|
||||
nodeName: "vm1",
|
||||
ipV6: "1111:11111:00:00:1111:1111:000:111",
|
||||
ipV6Public: "2222:22221:00:00:2222:2222:000:111",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1111:11111:00:00:1111:1111:000:111",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: "2222:22221:00:00:2222:2222:000:111",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf(metadataTemplate, test.nodeName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
ipAddresses, err := cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName))
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ipAddresses, test.expected) {
|
||||
t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/injection.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/injection.go
generated
vendored
@ -118,7 +118,8 @@ func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.
|
||||
}
|
||||
|
||||
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil)
|
||||
background := metav1.DeletePropagationBackground
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &background})
|
||||
}
|
||||
|
||||
type fakeJobControl struct {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -207,14 +207,18 @@ func (a *HorizontalController) processNextWorkItem() bool {
|
||||
}
|
||||
defer a.queue.Done(key)
|
||||
|
||||
err := a.reconcileKey(key.(string))
|
||||
if err == nil {
|
||||
// don't "forget" here because we want to only process a given HPA once per resync interval
|
||||
return true
|
||||
deleted, err := a.reconcileKey(key.(string))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
// Add request processing HPA after resync interval just in case last resync didn't insert
|
||||
// request into the queue. Request is not inserted into queue by resync if previous one wasn't processed yet.
|
||||
// This happens quite often because requests from previous resync are removed from the queue at the same moment
|
||||
// as next resync inserts new requests.
|
||||
if !deleted {
|
||||
a.queue.AddRateLimited(key)
|
||||
}
|
||||
|
||||
a.queue.AddRateLimited(key)
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -298,20 +302,20 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
return replicas, metric, statuses, timestamp, nil
|
||||
}
|
||||
|
||||
func (a *HorizontalController) reconcileKey(key string) error {
|
||||
func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error) {
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
return true, err
|
||||
}
|
||||
|
||||
hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
|
||||
delete(a.recommendations, key)
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return a.reconcileAutoscaler(hpa, key)
|
||||
return false, a.reconcileAutoscaler(hpa, key)
|
||||
}
|
||||
|
||||
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
@ -2176,7 +2176,7 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
|
||||
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
|
||||
|
||||
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
|
||||
tc := testCase{
|
||||
tc1 := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 1000,
|
||||
initialReplicas: startPods,
|
||||
@ -2209,22 +2209,49 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc1.runTest(t)
|
||||
|
||||
tc.runTest(t)
|
||||
|
||||
// Reuse the data structure above, now testing "unscaling".
|
||||
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
|
||||
target = math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .004
|
||||
finalCPUPercentTarget = int32(target * 100)
|
||||
tc.CPUTarget = finalCPUPercentTarget
|
||||
tc.initialReplicas = startPods
|
||||
tc.expectedDesiredReplicas = startPods
|
||||
tc.expectedConditions = statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ReadyForNewScale",
|
||||
})
|
||||
tc.runTest(t)
|
||||
tc2 := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 1000,
|
||||
initialReplicas: startPods,
|
||||
expectedDesiredReplicas: startPods,
|
||||
CPUTarget: finalCPUPercentTarget,
|
||||
reportedLevels: []uint64{
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
},
|
||||
reportedCPURequests: []resource.Quantity{
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ReadyForNewScale",
|
||||
}),
|
||||
}
|
||||
tc2.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpRCImmediately(t *testing.T) {
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
@ -100,6 +100,8 @@ type legacyTestCase struct {
|
||||
// Last scale time
|
||||
lastScaleTime *metav1.Time
|
||||
recommendations []timestampedRecommendation
|
||||
|
||||
finished bool
|
||||
}
|
||||
|
||||
// Needs to be called under a lock.
|
||||
@ -462,12 +464,14 @@ func (tc *legacyTestCase) verifyResults(t *testing.T) {
|
||||
func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
testClient, testScaleClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
|
||||
eventClient := &fake.Clientset{}
|
||||
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
if tc.finished {
|
||||
return true, &v1.Event{}, nil
|
||||
}
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
@ -514,7 +518,10 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
informerFactory.Start(stop)
|
||||
go hpaController.Run(stop)
|
||||
|
||||
// Wait for HPA to be processed.
|
||||
<-tc.processed
|
||||
tc.Lock()
|
||||
tc.finished = true
|
||||
if tc.verifyEvents {
|
||||
tc.Unlock()
|
||||
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
||||
@ -522,9 +529,8 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
} else {
|
||||
tc.Unlock()
|
||||
}
|
||||
// Wait for HPA to be processed.
|
||||
<-tc.processed
|
||||
tc.verifyResults(t)
|
||||
|
||||
}
|
||||
|
||||
func TestLegacyScaleUp(t *testing.T) {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go
generated
vendored
@ -343,6 +343,13 @@ func (o *ApplyOptions) Run() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// If server-dry-run is requested but the type doesn't support it, fail right away.
|
||||
if o.ServerDryRun {
|
||||
if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if info.Namespaced() {
|
||||
visitedNamespaces.Insert(info.Namespace)
|
||||
}
|
||||
@ -366,12 +373,6 @@ func (o *ApplyOptions) Run() error {
|
||||
if !errors.IsNotFound(err) {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err)
|
||||
}
|
||||
// If server-dry-run is requested but the type doesn't support it, fail right away.
|
||||
if o.ServerDryRun {
|
||||
if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create the resource if it doesn't exist
|
||||
// First, update the annotation used by kubectl apply
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go
generated
vendored
@ -289,7 +289,17 @@ func (obj InfoObject) Merged() (runtime.Object, error) {
|
||||
}
|
||||
|
||||
func (obj InfoObject) Name() string {
|
||||
return obj.Info.Name
|
||||
group := ""
|
||||
if obj.Info.Mapping.GroupVersionKind.Group != "" {
|
||||
group = fmt.Sprintf("%v.", obj.Info.Mapping.GroupVersionKind.Group)
|
||||
}
|
||||
return group + fmt.Sprintf(
|
||||
"%v.%v.%v.%v",
|
||||
obj.Info.Mapping.GroupVersionKind.Version,
|
||||
obj.Info.Mapping.GroupVersionKind.Kind,
|
||||
obj.Info.Namespace,
|
||||
obj.Info.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Differ creates two DiffVersion and diffs them.
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go
generated
vendored
@ -334,8 +334,10 @@ func (r *RuntimeSorter) Sort() error {
|
||||
case *metav1beta1.Table:
|
||||
includesTable = true
|
||||
|
||||
if err := NewTableSorter(t, r.field).Sort(); err != nil {
|
||||
continue
|
||||
if sorter, err := NewTableSorter(t, r.field); err != nil {
|
||||
return err
|
||||
} else if err := sorter.Sort(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
includesRuntimeObjs = true
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go
generated
vendored
@ -318,13 +318,18 @@ func (t *TableSorter) Len() int {
|
||||
|
||||
func (t *TableSorter) Swap(i, j int) {
|
||||
t.obj.Rows[i], t.obj.Rows[j] = t.obj.Rows[j], t.obj.Rows[i]
|
||||
t.parsedRows[i], t.parsedRows[j] = t.parsedRows[j], t.parsedRows[i]
|
||||
}
|
||||
|
||||
func (t *TableSorter) Less(i, j int) bool {
|
||||
iValues := t.parsedRows[i]
|
||||
jValues := t.parsedRows[j]
|
||||
if len(iValues) == 0 || len(iValues[0]) == 0 || len(jValues) == 0 || len(jValues[0]) == 0 {
|
||||
klog.Fatalf("couldn't find any field with path %q in the list of objects", t.field)
|
||||
|
||||
if len(iValues) == 0 || len(iValues[0]) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(jValues) == 0 || len(jValues[0]) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
iField := iValues[0][0]
|
||||
@ -342,28 +347,36 @@ func (t *TableSorter) Sort() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTableSorter(table *metav1beta1.Table, field string) *TableSorter {
|
||||
func NewTableSorter(table *metav1beta1.Table, field string) (*TableSorter, error) {
|
||||
var parsedRows [][][]reflect.Value
|
||||
|
||||
parser := jsonpath.New("sorting").AllowMissingKeys(true)
|
||||
err := parser.Parse(field)
|
||||
if err != nil {
|
||||
klog.Fatalf("sorting error: %v\n", err)
|
||||
return nil, fmt.Errorf("sorting error: %v", err)
|
||||
}
|
||||
|
||||
fieldFoundOnce := false
|
||||
for i := range table.Rows {
|
||||
parsedRow, err := findJSONPathResults(parser, table.Rows[i].Object.Object)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err)
|
||||
return nil, fmt.Errorf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err)
|
||||
}
|
||||
parsedRows = append(parsedRows, parsedRow)
|
||||
if len(parsedRow) > 0 && len(parsedRow[0]) > 0 {
|
||||
fieldFoundOnce = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(table.Rows) > 0 && !fieldFoundOnce {
|
||||
return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field)
|
||||
}
|
||||
|
||||
return &TableSorter{
|
||||
obj: table,
|
||||
field: field,
|
||||
parsedRows: parsedRows,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
func findJSONPathResults(parser *jsonpath.JSONPath, from runtime.Object) ([][]reflect.Value, error) {
|
||||
if unstructuredObj, ok := from.(*unstructured.Unstructured); ok {
|
||||
|
82
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter_test.go
generated
vendored
82
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package get
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -25,10 +26,20 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
)
|
||||
|
||||
func toUnstructuredOrDie(data []byte) *unstructured.Unstructured {
|
||||
unstrBody := map[string]interface{}{}
|
||||
err := json.Unmarshal(data, &unstrBody)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: unstrBody}
|
||||
}
|
||||
func encodeOrDie(obj runtime.Object) []byte {
|
||||
data, err := runtime.Encode(scheme.Codecs.LegacyCodec(corev1.SchemeGroupVersion), obj)
|
||||
if err != nil {
|
||||
@ -65,6 +76,16 @@ func TestSortingPrinter(t *testing.T) {
|
||||
name string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
obj: &corev1.PodList{
|
||||
Items: []corev1.Pod{},
|
||||
},
|
||||
sort: &corev1.PodList{
|
||||
Items: []corev1.Pod{},
|
||||
},
|
||||
field: "{.metadata.name}",
|
||||
},
|
||||
{
|
||||
name: "in-order-already",
|
||||
obj: &corev1.PodList{
|
||||
@ -237,16 +258,16 @@ func TestSortingPrinter(t *testing.T) {
|
||||
name: "v1.List in order",
|
||||
obj: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
},
|
||||
},
|
||||
sort: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
},
|
||||
},
|
||||
field: "{.metadata.name}",
|
||||
@ -255,16 +276,16 @@ func TestSortingPrinter(t *testing.T) {
|
||||
name: "v1.List in reverse",
|
||||
obj: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
},
|
||||
},
|
||||
sort: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
},
|
||||
},
|
||||
field: "{.metadata.name}",
|
||||
@ -390,6 +411,43 @@ func TestSortingPrinter(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name+" table", func(t *testing.T) {
|
||||
table := &metav1beta1.Table{}
|
||||
meta.EachListItem(tt.obj, func(item runtime.Object) error {
|
||||
table.Rows = append(table.Rows, metav1beta1.TableRow{
|
||||
Object: runtime.RawExtension{Object: toUnstructuredOrDie(encodeOrDie(item))},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
|
||||
expectedTable := &metav1beta1.Table{}
|
||||
meta.EachListItem(tt.sort, func(item runtime.Object) error {
|
||||
expectedTable.Rows = append(expectedTable.Rows, metav1beta1.TableRow{
|
||||
Object: runtime.RawExtension{Object: toUnstructuredOrDie(encodeOrDie(item))},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
|
||||
sorter, err := NewTableSorter(table, tt.field)
|
||||
if err == nil {
|
||||
err = sorter.Sort()
|
||||
}
|
||||
if err != nil {
|
||||
if len(tt.expectedErr) > 0 {
|
||||
if strings.Contains(err.Error(), tt.expectedErr) {
|
||||
return
|
||||
}
|
||||
t.Fatalf("%s: expected error containing: %q, got: \"%v\"", tt.name, tt.expectedErr, err)
|
||||
}
|
||||
t.Fatalf("%s: unexpected error: %v", tt.name, err)
|
||||
}
|
||||
if len(tt.expectedErr) > 0 {
|
||||
t.Fatalf("%s: expected error containing: %q, got none", tt.name, tt.expectedErr)
|
||||
}
|
||||
if !reflect.DeepEqual(table, expectedTable) {
|
||||
t.Errorf("[%s]\nexpected/saw:\n%s", tt.name, diff.ObjectReflectDiff(expectedTable, table))
|
||||
}
|
||||
})
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sort := &SortingPrinter{SortField: tt.field, Decoder: scheme.Codecs.UniversalDecoder()}
|
||||
err := sort.sortObj(tt.obj)
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package versioned
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
@ -152,6 +153,7 @@ func handleDockerCfgJSONContent(username, password, email, server string) ([]byt
|
||||
Username: username,
|
||||
Password: password,
|
||||
Email: email,
|
||||
Auth: encodeDockerConfigFieldAuth(username, password),
|
||||
}
|
||||
|
||||
dockerCfgJSON := DockerConfigJSON{
|
||||
@ -161,6 +163,11 @@ func handleDockerCfgJSONContent(username, password, email, server string) ([]byt
|
||||
return json.Marshal(dockerCfgJSON)
|
||||
}
|
||||
|
||||
func encodeDockerConfigFieldAuth(username, password string) string {
|
||||
fieldValue := username + ":" + password
|
||||
return base64.StdEncoding.EncodeToString([]byte(fieldValue))
|
||||
}
|
||||
|
||||
// DockerConfigJSON represents a local docker auth config file
|
||||
// for pulling images.
|
||||
type DockerConfigJSON struct {
|
||||
@ -175,7 +182,8 @@ type DockerConfigJSON struct {
|
||||
type DockerConfig map[string]DockerConfigEntry
|
||||
|
||||
type DockerConfigEntry struct {
|
||||
Username string
|
||||
Password string
|
||||
Email string
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func TestSecretForDockerRegistryGenerate(t *testing.T) {
|
||||
},
|
||||
expected: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo-7566tc6mgc",
|
||||
Name: "foo-548cm7fgdh",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: secretData,
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
@ -165,6 +165,7 @@ go_test(
|
||||
"kubelet_pods_windows_test.go",
|
||||
"kubelet_resources_test.go",
|
||||
"kubelet_test.go",
|
||||
"kubelet_volumes_linux_test.go",
|
||||
"kubelet_volumes_test.go",
|
||||
"main_test.go",
|
||||
"oom_watcher_test.go",
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go
generated
vendored
@ -19,6 +19,7 @@ package config
|
||||
const (
|
||||
DefaultKubeletPodsDirName = "pods"
|
||||
DefaultKubeletVolumesDirName = "volumes"
|
||||
DefaultKubeletVolumeSubpathsDirName = "volume-subpaths"
|
||||
DefaultKubeletVolumeDevicesDirName = "volumeDevices"
|
||||
DefaultKubeletPluginsDirName = "plugins"
|
||||
DefaultKubeletPluginsRegistrationDirName = "plugins_registry"
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
@ -99,6 +99,13 @@ func (kl *Kubelet) getPodDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodsDir(), string(podUID))
|
||||
}
|
||||
|
||||
// getPodVolumesSubpathsDir returns the full path to the per-pod subpaths directory under
|
||||
// which subpath volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist or subpaths are not specified.
|
||||
func (kl *Kubelet) getPodVolumeSubpathsDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeSubpathsDirName)
|
||||
}
|
||||
|
||||
// getPodVolumesDir returns the full path to the per-pod data directory under
|
||||
// which volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist.
|
||||
@ -315,6 +322,19 @@ func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string,
|
||||
return mountedVolumes, nil
|
||||
}
|
||||
|
||||
// podVolumesSubpathsDirExists returns true if the pod volume-subpaths directory for
|
||||
// a given pod exists
|
||||
func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) {
|
||||
podVolDir := kl.getPodVolumeSubpathsDir(podUID)
|
||||
|
||||
if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {
|
||||
return true, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
||||
} else if !pathExists {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetVersionInfo returns information about the version of cAdvisor in use.
|
||||
func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
|
||||
return kl.cadvisor.VersionInfo()
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
@ -19,7 +19,7 @@ package kubelet
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -114,6 +114,8 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
||||
}
|
||||
// If volumes have not been unmounted/detached, do not delete directory.
|
||||
// Doing so may result in corruption of data.
|
||||
// TODO: getMountedVolumePathListFromDisk() call may be redundant with
|
||||
// kl.getPodVolumePathListFromDisk(). Can this be cleaned up?
|
||||
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
|
||||
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
|
||||
continue
|
||||
@ -128,6 +130,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are any volume-subpaths, do not cleanup directories
|
||||
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid)
|
||||
if err != nil {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
|
||||
continue
|
||||
}
|
||||
if volumeSubpathExists {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume subpaths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
|
||||
klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
|
||||
|
156
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes_linux_test.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
)
|
||||
|
||||
func validateDirExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirNotExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("dir %q still exists", dir)
|
||||
}
|
||||
|
||||
func TestCleanupOrphanedPodDirs(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
pods []*v1.Pod
|
||||
prepareFunc func(kubelet *Kubelet) error
|
||||
validateFunc func(kubelet *Kubelet) error
|
||||
expectErr bool
|
||||
}{
|
||||
"nothing-to-do": {},
|
||||
"pods-dir-not-found": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
return os.Remove(kubelet.getPodsDir())
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
"pod-doesnot-exist-novolume": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "not/a/volume"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirNotExists(filepath.Join(podDir, "not"))
|
||||
},
|
||||
},
|
||||
"pod-exists-with-volume": {
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volumes/plugin/name"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volumes/plugin/name"))
|
||||
},
|
||||
},
|
||||
"pod-doesnot-exist-with-volume": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volumes/plugin/name"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volumes/plugin/name"))
|
||||
},
|
||||
},
|
||||
"pod-doesnot-exist-with-subpath": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volume-subpaths/volume/container/index"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volume-subpaths/volume/container/index"))
|
||||
},
|
||||
},
|
||||
"pod-doesnot-exist-with-subpath-top": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volume-subpaths"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volume-subpaths"))
|
||||
},
|
||||
},
|
||||
// TODO: test volume in volume-manager
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
if tc.prepareFunc != nil {
|
||||
if err := tc.prepareFunc(kubelet); err != nil {
|
||||
t.Fatalf("%s failed preparation: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := kubelet.cleanupOrphanedPodDirs(tc.pods, nil)
|
||||
if tc.expectErr && err == nil {
|
||||
t.Errorf("%s failed: expected error, got success", name)
|
||||
}
|
||||
if !tc.expectErr && err != nil {
|
||||
t.Errorf("%s failed: got error %v", name, err)
|
||||
}
|
||||
|
||||
if tc.validateFunc != nil {
|
||||
if err := tc.validateFunc(kubelet); err != nil {
|
||||
t.Errorf("%s failed validation: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
@ -57,6 +57,9 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
||||
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
return "", message, err
|
||||
}
|
||||
if runtimeHandler != "" {
|
||||
klog.V(2).Infof("Running pod %s with RuntimeHandler %q", format.Pod(pod), runtimeHandler)
|
||||
}
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD
generated
vendored
@ -18,6 +18,7 @@ go_library(
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/util:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go
generated
vendored
@ -19,7 +19,11 @@ package stats
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
)
|
||||
|
||||
type SummaryProvider interface {
|
||||
@ -32,6 +36,11 @@ type SummaryProvider interface {
|
||||
|
||||
// summaryProviderImpl implements the SummaryProvider interface.
|
||||
type summaryProviderImpl struct {
|
||||
// kubeletCreationTime is the time at which the summaryProvider was created.
|
||||
kubeletCreationTime metav1.Time
|
||||
// systemBootTime is the time at which the system was started
|
||||
systemBootTime metav1.Time
|
||||
|
||||
provider StatsProvider
|
||||
}
|
||||
|
||||
@ -40,7 +49,18 @@ var _ SummaryProvider = &summaryProviderImpl{}
|
||||
// NewSummaryProvider returns a SummaryProvider using the stats provided by the
|
||||
// specified statsProvider.
|
||||
func NewSummaryProvider(statsProvider StatsProvider) SummaryProvider {
|
||||
return &summaryProviderImpl{statsProvider}
|
||||
kubeletCreationTime := metav1.Now()
|
||||
bootTime, err := util.GetBootTime()
|
||||
if err != nil {
|
||||
// bootTime will be zero if we encounter an error getting the boot time.
|
||||
klog.Warningf("Error getting system boot time. Node metrics will have an incorrect start time: %v", err)
|
||||
}
|
||||
|
||||
return &summaryProviderImpl{
|
||||
kubeletCreationTime: kubeletCreationTime,
|
||||
systemBootTime: metav1.NewTime(bootTime),
|
||||
provider: statsProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) {
|
||||
@ -77,7 +97,7 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
|
||||
CPU: rootStats.CPU,
|
||||
Memory: rootStats.Memory,
|
||||
Network: networkStats,
|
||||
StartTime: rootStats.StartTime,
|
||||
StartTime: sp.systemBootTime,
|
||||
Fs: rootFsStats,
|
||||
Runtime: &statsapi.RuntimeStats{ImageFs: imageFsStats},
|
||||
Rlimit: rlimit,
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go
generated
vendored
@ -21,6 +21,7 @@ package stats
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
)
|
||||
@ -29,11 +30,12 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig
|
||||
systemContainers := map[string]struct {
|
||||
name string
|
||||
forceStatsUpdate bool
|
||||
startTime metav1.Time
|
||||
}{
|
||||
statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false},
|
||||
statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false},
|
||||
statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false},
|
||||
statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats},
|
||||
statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime},
|
||||
statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats},
|
||||
}
|
||||
for sys, cont := range systemContainers {
|
||||
// skip if cgroup name is undefined (not all system containers are required)
|
||||
@ -48,6 +50,11 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig
|
||||
// System containers don't have a filesystem associated with them.
|
||||
s.Logs, s.Rootfs = nil, nil
|
||||
s.Name = sys
|
||||
|
||||
// if we know the start time of a system container, use that instead of the start time provided by cAdvisor
|
||||
if !cont.startTime.IsZero() {
|
||||
s.StartTime = cont.startTime
|
||||
}
|
||||
stats = append(stats, *s)
|
||||
}
|
||||
|
||||
@ -58,11 +65,12 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c
|
||||
systemContainers := map[string]struct {
|
||||
name string
|
||||
forceStatsUpdate bool
|
||||
startTime metav1.Time
|
||||
}{
|
||||
statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false},
|
||||
statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false},
|
||||
statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false},
|
||||
statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats},
|
||||
statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime},
|
||||
statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats},
|
||||
}
|
||||
for sys, cont := range systemContainers {
|
||||
// skip if cgroup name is undefined (not all system containers are required)
|
||||
@ -75,6 +83,11 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c
|
||||
continue
|
||||
}
|
||||
s.Name = sys
|
||||
|
||||
// if we know the start time of a system container, use that instead of the start time provided by cAdvisor
|
||||
if !cont.startTime.IsZero() {
|
||||
s.StartTime = cont.startTime
|
||||
}
|
||||
stats = append(stats, *s)
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go
generated
vendored
@ -83,12 +83,14 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
On("GetCgroupStats", "/kubelet", false).Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil).
|
||||
On("GetCgroupStats", "/kubepods", true).Return(cgroupStatsMap["/pods"].cs, cgroupStatsMap["/pods"].ns, nil)
|
||||
|
||||
provider := NewSummaryProvider(mockStatsProvider)
|
||||
kubeletCreationTime := metav1.Now()
|
||||
systemBootTime := metav1.Now()
|
||||
provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider}
|
||||
summary, err := provider.Get(true)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
assert.Equal(summary.Node.StartTime, cgroupStatsMap["/"].cs.StartTime)
|
||||
assert.Equal(summary.Node.StartTime, systemBootTime)
|
||||
assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU)
|
||||
assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory)
|
||||
assert.Equal(summary.Node.Network, cgroupStatsMap["/"].ns)
|
||||
@ -98,7 +100,7 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
assert.Equal(len(summary.Node.SystemContainers), 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
Name: "kubelet",
|
||||
StartTime: cgroupStatsMap["/kubelet"].cs.StartTime,
|
||||
StartTime: kubeletCreationTime,
|
||||
CPU: cgroupStatsMap["/kubelet"].cs.CPU,
|
||||
Memory: cgroupStatsMap["/kubelet"].cs.Memory,
|
||||
Accelerators: cgroupStatsMap["/kubelet"].cs.Accelerators,
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
@ -34,6 +34,8 @@ go_test(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"boottime_util_darwin.go",
|
||||
"boottime_util_linux.go",
|
||||
"doc.go",
|
||||
"util.go",
|
||||
"util_unix.go",
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// +build darwin
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
output, err := unix.SysctlRaw("kern.boottime")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
var timeval syscall.Timeval
|
||||
if len(output) != int(unsafe.Sizeof(timeval)) {
|
||||
return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v",
|
||||
int(unsafe.Sizeof(timeval)), len(output))
|
||||
}
|
||||
timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0]))
|
||||
sec, nsec := timeval.Unix()
|
||||
return time.Unix(sec, nsec).Truncate(time.Second), nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// +build freebsd linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
var info unix.Sysinfo_t
|
||||
if err := unix.Sysinfo(&info); err != nil {
|
||||
return time.Time{}, fmt.Errorf("error getting system uptime: %s", err)
|
||||
}
|
||||
return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil
|
||||
}
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
@ -45,3 +45,8 @@ func UnlockPath(fileHandles []uintptr) {
|
||||
func LocalEndpoint(path, file string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetBootTime empty implementation
|
||||
func GetBootTime() (time.Time, error) {
|
||||
return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build")
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
@ -112,3 +113,15 @@ func LocalEndpoint(path, file string) string {
|
||||
}
|
||||
return u.String() + "//./pipe/" + file
|
||||
}
|
||||
|
||||
var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64")
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
output, _, err := tickCount.Call()
|
||||
if errno, ok := err.(syscall.Errno); !ok || errno != 0 {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
@ -203,11 +203,12 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
|
||||
var volumeName v1.UniqueVolumeName
|
||||
|
||||
// The unique volume name used depends on whether the volume is attachable
|
||||
// The unique volume name used depends on whether the volume is attachable/device-mountable
|
||||
// or not.
|
||||
attachable := dsw.isAttachableVolume(volumeSpec)
|
||||
if attachable {
|
||||
// For attachable volumes, use the unique volume name as reported by
|
||||
deviceMountable := dsw.isDeviceMountableVolume(volumeSpec)
|
||||
if attachable || deviceMountable {
|
||||
// For attachable/device-mountable volumes, use the unique volume name as reported by
|
||||
// the plugin.
|
||||
volumeName, err =
|
||||
util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||
@ -219,13 +220,11 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
err)
|
||||
}
|
||||
} else {
|
||||
// For non-attachable volumes, generate a unique name based on the pod
|
||||
// For non-attachable and non-device-mountable volumes, generate a unique name based on the pod
|
||||
// namespace and name and the name of the volume within the pod.
|
||||
volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
||||
volumeName = util.GetUniqueVolumeNameFromSpecWithPod(podName, volumePlugin, volumeSpec)
|
||||
}
|
||||
|
||||
deviceMountable := dsw.isDeviceMountableVolume(volumeSpec)
|
||||
|
||||
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||
dsw.volumesToMount[volumeName] = volumeToMount{
|
||||
volumeName: volumeName,
|
||||
|
162
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go
generated
vendored
162
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go
generated
vendored
@ -117,6 +117,168 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
|
||||
verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw)
|
||||
}
|
||||
|
||||
// Call AddPodToVolume() on different pods for different kinds of volumes
|
||||
// Verities generated names are same for different pods if volume is device mountable or attachable
|
||||
// Verities generated names are different for different pods if volume is not device mountble and attachable
|
||||
func Test_AddPodToVolume_Positive_NamesForDifferentPodsAndDifferentVolumes(t *testing.T) {
|
||||
// Arrange
|
||||
fakeVolumeHost := volumetesting.NewFakeVolumeHost(
|
||||
"", /* rootDir */
|
||||
nil, /* kubeClient */
|
||||
nil, /* plugins */
|
||||
)
|
||||
plugins := []volume.VolumePlugin{
|
||||
&volumetesting.FakeBasicVolumePlugin{
|
||||
Plugin: volumetesting.FakeVolumePlugin{
|
||||
PluginName: "basic",
|
||||
},
|
||||
},
|
||||
&volumetesting.FakeDeviceMountableVolumePlugin{
|
||||
FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{
|
||||
Plugin: volumetesting.FakeVolumePlugin{
|
||||
PluginName: "device-mountable",
|
||||
},
|
||||
},
|
||||
},
|
||||
&volumetesting.FakeAttachableVolumePlugin{
|
||||
FakeDeviceMountableVolumePlugin: volumetesting.FakeDeviceMountableVolumePlugin{
|
||||
FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{
|
||||
Plugin: volumetesting.FakeVolumePlugin{
|
||||
PluginName: "attachable",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumePluginMgr := volume.VolumePluginMgr{}
|
||||
volumePluginMgr.InitPlugins(plugins, nil /* prober */, fakeVolumeHost)
|
||||
dsw := NewDesiredStateOfWorld(&volumePluginMgr)
|
||||
|
||||
testcases := map[string]struct {
|
||||
pod1 *v1.Pod
|
||||
pod2 *v1.Pod
|
||||
same bool
|
||||
}{
|
||||
"basic": {
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "basic",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "basic",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
"device-mountable": {
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "device-mountable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "device-mountable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
"attachable": {
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "attachable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "attachable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
// Act & Assert
|
||||
for name, v := range testcases {
|
||||
volumeSpec1 := &volume.Spec{Volume: &v.pod1.Spec.Volumes[0]}
|
||||
volumeSpec2 := &volume.Spec{Volume: &v.pod2.Spec.Volumes[0]}
|
||||
generatedVolumeName1, err1 := dsw.AddPodToVolume(util.GetUniquePodName(v.pod1), v.pod1, volumeSpec1, volumeSpec1.Name(), "")
|
||||
generatedVolumeName2, err2 := dsw.AddPodToVolume(util.GetUniquePodName(v.pod2), v.pod2, volumeSpec2, volumeSpec2.Name(), "")
|
||||
if err1 != nil {
|
||||
t.Fatalf("test %q: AddPodToVolume failed. Expected: <no error> Actual: <%v>", name, err1)
|
||||
}
|
||||
if err2 != nil {
|
||||
t.Fatalf("test %q: AddPodToVolume failed. Expected: <no error> Actual: <%v>", name, err2)
|
||||
}
|
||||
if v.same {
|
||||
if generatedVolumeName1 != generatedVolumeName2 {
|
||||
t.Fatalf("test %q: AddPodToVolume should generate same names, but got %q != %q", name, generatedVolumeName1, generatedVolumeName2)
|
||||
}
|
||||
} else {
|
||||
if generatedVolumeName1 == generatedVolumeName2 {
|
||||
t.Fatalf("test %q: AddPodToVolume should generate different names, but got %q == %q", name, generatedVolumeName1, generatedVolumeName2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Populates data struct with a new volume/pod
|
||||
// Calls DeletePodFromVolume() to removes the pod
|
||||
// Verifies newly added pod/volume are deleted
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
@ -455,6 +455,10 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceMountablePlugin, err := rc.volumePluginMgr.FindDeviceMountablePluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create pod object
|
||||
pod := &v1.Pod{
|
||||
@ -480,13 +484,13 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
}
|
||||
|
||||
var uniqueVolumeName v1.UniqueVolumeName
|
||||
if attachablePlugin != nil {
|
||||
if attachablePlugin != nil || deviceMountablePlugin != nil {
|
||||
uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
uniqueVolumeName = util.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
||||
uniqueVolumeName = util.GetUniqueVolumeNameFromSpecWithPod(volume.podName, plugin, volumeSpec)
|
||||
}
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go
generated
vendored
@ -75,10 +75,10 @@ func (q *graceTerminateRSList) remove(rs *listItem) bool {
|
||||
|
||||
uniqueRS := rs.String()
|
||||
if _, ok := q.list[uniqueRS]; ok {
|
||||
return false
|
||||
delete(q.list, uniqueRS)
|
||||
return true
|
||||
}
|
||||
delete(q.list, uniqueRS)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool {
|
||||
@ -164,7 +164,11 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e
|
||||
}
|
||||
for _, rs := range rss {
|
||||
if rsToDelete.RealServer.Equal(rs) {
|
||||
if rs.ActiveConn != 0 {
|
||||
// Delete RS with no connections
|
||||
// For UDP, ActiveConn is always 0
|
||||
// For TCP, InactiveConn are connections not in ESTABLISHED state
|
||||
if rs.ActiveConn+rs.InactiveConn != 0 {
|
||||
klog.Infof("Not deleting, RS %v: %v ActiveConn, %v InactiveConn", rsToDelete.String(), rs.ActiveConn, rs.InactiveConn)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("Deleting rs: %s", rsToDelete.String())
|
||||
|
84
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
84
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
@ -162,6 +162,8 @@ const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet"
|
||||
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
|
||||
const sysctlVSConnTrack = "net/ipv4/vs/conntrack"
|
||||
const sysctlConnReuse = "net/ipv4/vs/conn_reuse_mode"
|
||||
const sysctlExpireNoDestConn = "net/ipv4/vs/expire_nodest_conn"
|
||||
const sysctlExpireQuiescentTemplate = "net/ipv4/vs/expire_quiescent_template"
|
||||
const sysctlForward = "net/ipv4/ip_forward"
|
||||
const sysctlArpIgnore = "net/ipv4/conf/all/arp_ignore"
|
||||
const sysctlArpAnnounce = "net/ipv4/conf/all/arp_announce"
|
||||
@ -321,6 +323,20 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
}
|
||||
}
|
||||
|
||||
// Set the expire_nodest_conn sysctl we need for
|
||||
if val, _ := sysctl.GetSysctl(sysctlExpireNoDestConn); val != 1 {
|
||||
if err := sysctl.SetSysctl(sysctlExpireNoDestConn, 1); err != nil {
|
||||
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlExpireNoDestConn, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the expire_quiescent_template sysctl we need for
|
||||
if val, _ := sysctl.GetSysctl(sysctlExpireQuiescentTemplate); val != 1 {
|
||||
if err := sysctl.SetSysctl(sysctlExpireQuiescentTemplate, 1); err != nil {
|
||||
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlExpireQuiescentTemplate, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the ip_forward sysctl we need for
|
||||
if val, _ := sysctl.GetSysctl(sysctlForward); val != 1 {
|
||||
if err := sysctl.SetSysctl(sysctlForward, 1); err != nil {
|
||||
@ -1190,7 +1206,15 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
proxier.portsMap = replacementPortsMap
|
||||
|
||||
// Clean up legacy IPVS services
|
||||
// Get legacy bind address
|
||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get bind address, err: %v", err)
|
||||
}
|
||||
legacyBindAddrs := proxier.getLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
|
||||
// Clean up legacy IPVS services and unbind addresses
|
||||
appliedSvcs, err := proxier.ipvs.GetVirtualServers()
|
||||
if err == nil {
|
||||
for _, appliedSvc := range appliedSvcs {
|
||||
@ -1199,15 +1223,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
} else {
|
||||
klog.Errorf("Failed to get ipvs service, err: %v", err)
|
||||
}
|
||||
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices)
|
||||
|
||||
// Clean up legacy bind address
|
||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get bind address, err: %v", err)
|
||||
}
|
||||
proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices, legacyBindAddrs)
|
||||
|
||||
// Update healthz timestamp
|
||||
if proxier.healthzServer != nil {
|
||||
@ -1602,32 +1618,41 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
|
||||
Port: uint16(portNum),
|
||||
}
|
||||
|
||||
klog.V(5).Infof("Using graceful delete to delete: %v", delDest)
|
||||
klog.V(5).Infof("Using graceful delete to delete: %v", uniqueRS)
|
||||
err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete destination: %v, error: %v", delDest, err)
|
||||
klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer) {
|
||||
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) {
|
||||
for cs := range currentServices {
|
||||
svc := currentServices[cs]
|
||||
if _, ok := activeServices[cs]; !ok {
|
||||
// This service was not processed in the latest sync loop so before deleting it,
|
||||
// make sure it does not fall within an excluded CIDR range.
|
||||
okayToDelete := true
|
||||
rsList, _ := proxier.ipvs.GetRealServers(svc)
|
||||
|
||||
// If we still have real servers graceful termination is not done
|
||||
if len(rsList) > 0 {
|
||||
okayToDelete = false
|
||||
}
|
||||
// Applying graceful termination to all real servers
|
||||
for _, rs := range rsList {
|
||||
uniqueRS := GetUniqueRSName(svc, rs)
|
||||
// if there are in terminating real server in this service, then handle it later
|
||||
// If RS is already in the graceful termination list, no need to add it again
|
||||
if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
|
||||
okayToDelete = false
|
||||
break
|
||||
continue
|
||||
}
|
||||
klog.V(5).Infof("Using graceful delete to delete: %v", uniqueRS)
|
||||
if err := proxier.gracefuldeleteManager.GracefulDeleteRS(svc, rs); err != nil {
|
||||
klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err)
|
||||
}
|
||||
}
|
||||
// make sure it does not fall within an excluded CIDR range.
|
||||
for _, excludedCIDR := range proxier.excludeCIDRs {
|
||||
// Any validation of this CIDR already should have occurred.
|
||||
_, n, _ := net.ParseCIDR(excludedCIDR)
|
||||
@ -1637,26 +1662,33 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
|
||||
}
|
||||
}
|
||||
if okayToDelete {
|
||||
klog.V(4).Infof("Delete service %s", svc.String())
|
||||
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
|
||||
klog.Errorf("Failed to delete service, error: %v", err)
|
||||
klog.Errorf("Failed to delete service %s, error: %v", svc.String(), err)
|
||||
}
|
||||
addr := svc.Address.String()
|
||||
if _, ok := legacyBindAddrs[addr]; ok {
|
||||
klog.V(4).Infof("Unbinding address %s", addr)
|
||||
if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil {
|
||||
klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
|
||||
} else {
|
||||
// In case we delete a multi-port service, avoid trying to unbind multiple times
|
||||
delete(legacyBindAddrs, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) {
|
||||
func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool {
|
||||
legacyAddrs := make(map[string]bool)
|
||||
for _, addr := range currentBindAddrs {
|
||||
if _, ok := activeBindAddrs[addr]; !ok {
|
||||
// This address was not processed in the latest sync loop
|
||||
klog.V(4).Infof("Unbind addr %s", addr)
|
||||
err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice)
|
||||
// Ignore no such address error when try to unbind address
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
|
||||
}
|
||||
legacyAddrs[addr] = true
|
||||
}
|
||||
}
|
||||
return legacyAddrs
|
||||
}
|
||||
|
||||
// Join all words with spaces, terminate with newline and write to buff.
|
||||
|
108
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier_test.go
generated
vendored
108
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier_test.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -126,7 +125,7 @@ func (fakeSysctl *FakeSysctl) SetSysctl(sysctl string, newVal int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP) *Proxier {
|
||||
func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP, excludeCIDRs []string) *Proxier {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("dummy device have been created"), nil },
|
||||
@ -151,7 +150,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
|
||||
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, nil, nil),
|
||||
endpointsMap: make(proxy.EndpointsMap),
|
||||
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, nil, nil, nil),
|
||||
excludeCIDRs: make([]string, 0),
|
||||
excludeCIDRs: excludeCIDRs,
|
||||
iptables: ipt,
|
||||
ipvs: ipvs,
|
||||
ipset: ipset,
|
||||
@ -228,7 +227,7 @@ func TestCleanupLeftovers(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -418,7 +417,7 @@ func TestNodePortUDP(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil)
|
||||
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
@ -495,7 +494,7 @@ func TestNodePort(t *testing.T) {
|
||||
nodeIPv4 := net.ParseIP("100.101.102.103")
|
||||
nodeIPv6 := net.ParseIP("2001:db8::1:1")
|
||||
nodeIPs := sets.NewString(nodeIPv4.String(), nodeIPv6.String())
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIPv4, nodeIPv6})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIPv4, nodeIPv6}, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -573,7 +572,7 @@ func TestNodePortNoEndpoint(t *testing.T) {
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
nodeIP := net.ParseIP("100.101.102.103")
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -628,7 +627,7 @@ func TestClusterIPNoEndpoint(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcPortName := proxy.ServicePortName{
|
||||
@ -672,7 +671,7 @@ func TestClusterIP(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
svcIPv4 := "10.20.30.41"
|
||||
svcPortV4 := 80
|
||||
@ -779,7 +778,7 @@ func TestExternalIPsNoEndpoint(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcExternalIPs := "50.60.70.81"
|
||||
@ -834,7 +833,7 @@ func TestExternalIPs(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcExternalIPs := sets.NewString("50.60.70.81", "2012::51", "127.0.0.1")
|
||||
@ -1338,7 +1337,7 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
services := []*v1.Service{
|
||||
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
||||
@ -1448,7 +1447,7 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
|
||||
@ -1487,7 +1486,7 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
|
||||
@ -1515,7 +1514,7 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeClusterIP
|
||||
@ -1599,7 +1598,7 @@ func TestSessionAffinity(t *testing.T) {
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
nodeIP := net.ParseIP("100.101.102.103")
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -2462,7 +2461,7 @@ func Test_updateEndpointsMap(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
fp.hostname = nodeName
|
||||
|
||||
// First check that after adding all previous versions of endpoints,
|
||||
@ -2706,7 +2705,7 @@ func Test_syncService(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
proxier := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
if testCases[i].oldVirtualServer != nil {
|
||||
@ -2736,7 +2735,7 @@ func buildFakeProxier() (*iptablestest.FakeIPTables, *Proxier) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
return ipt, NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
return ipt, NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
}
|
||||
|
||||
func hasJump(rules []iptablestest.Rule, destChain, ipSet string) bool {
|
||||
@ -2806,33 +2805,10 @@ func checkIPVS(t *testing.T, fp *Proxier, vs *netlinktest.ExpectedVirtualServer)
|
||||
}
|
||||
|
||||
func TestCleanLegacyService(t *testing.T) {
|
||||
execer := exec.New()
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
excludeCIDRs := []string{"3.3.3.0/24", "4.4.4.0/24"}
|
||||
proxier, err := NewProxier(
|
||||
ipt,
|
||||
ipvs,
|
||||
ipset,
|
||||
NewFakeSysctl(),
|
||||
execer,
|
||||
250*time.Millisecond,
|
||||
100*time.Millisecond,
|
||||
excludeCIDRs,
|
||||
false,
|
||||
0,
|
||||
"10.0.0.0/24",
|
||||
testHostname,
|
||||
net.ParseIP("127.0.0.1"),
|
||||
nil,
|
||||
nil,
|
||||
DefaultScheduler,
|
||||
make([]string, 0),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"3.3.3.0/24", "4.4.4.0/24"})
|
||||
|
||||
// All ipvs services that were processed in the latest sync loop.
|
||||
activeServices := map[string]bool{"ipvs0": true, "ipvs1": true}
|
||||
@ -2888,15 +2864,22 @@ func TestCleanLegacyService(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for v := range currentServices {
|
||||
proxier.ipvs.AddVirtualServer(currentServices[v])
|
||||
fp.ipvs.AddVirtualServer(currentServices[v])
|
||||
}
|
||||
proxier.cleanLegacyService(activeServices, currentServices)
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
activeBindAddrs := map[string]bool{"1.1.1.1": true, "2.2.2.2": true, "3.3.3.3": true, "4.4.4.4": true}
|
||||
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6"}
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
}
|
||||
|
||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5.5.5.5": true, "6.6.6.6": true})
|
||||
// ipvs4 and ipvs5 should have been cleaned.
|
||||
remainingVirtualServers, _ := proxier.ipvs.GetVirtualServers()
|
||||
remainingVirtualServers, _ := fp.ipvs.GetVirtualServers()
|
||||
if len(remainingVirtualServers) != 4 {
|
||||
t.Errorf("Expected number of remaining IPVS services after cleanup to be %v. Got %v", 4, len(remainingVirtualServers))
|
||||
}
|
||||
|
||||
for _, vs := range remainingVirtualServers {
|
||||
// Checking that ipvs4 and ipvs5 were removed.
|
||||
if vs.Port == 57 {
|
||||
@ -2906,33 +2889,13 @@ func TestCleanLegacyService(t *testing.T) {
|
||||
t.Errorf("Expected ipvs5 to be removed after cleanup. It still remains")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanLegacyBindAddr(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
|
||||
// All ipvs service addresses that were bound to ipvs0 in the latest sync loop.
|
||||
activeBindAddrs := map[string]bool{"1.2.3.4": true, "1002:ab8::2:1": true}
|
||||
// All service addresses that were bound to ipvs0 in system
|
||||
currentBindAddrs := []string{"1.2.3.4", "1.2.3.5", "1.2.3.6", "1002:ab8::2:1", "1002:ab8::2:2"}
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
}
|
||||
fp.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
|
||||
// Addresses 5.5.5.5 and 6.6.6.6 should not be bound any more
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
// should only remain "1.2.3.4" and "1002:ab8::2:1"
|
||||
if len(remainingAddrs) != 2 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 2, len(remainingAddrs))
|
||||
if len(remainingAddrs) != 4 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 4, len(remainingAddrs))
|
||||
}
|
||||
|
||||
// check that address "1.2.3.4" and "1002:ab8::2:1" remain
|
||||
// check that address "1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4" are still bound
|
||||
remainingAddrsMap := make(map[string]bool)
|
||||
for i := range remainingAddrs {
|
||||
remainingAddrsMap[remainingAddrs[i]] = true
|
||||
@ -2940,13 +2903,14 @@ func TestCleanLegacyBindAddr(t *testing.T) {
|
||||
if !reflect.DeepEqual(activeBindAddrs, remainingAddrsMap) {
|
||||
t.Errorf("Expected remainingAddrsMap %v, got %v", activeBindAddrs, remainingAddrsMap)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMultiPortServiceBindAddr(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
service1 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeClusterIP
|
||||
|
47
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
@ -35,6 +37,11 @@ const (
|
||||
IPv6ZeroCIDR = "::/0"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAddressNotAllowed = errors.New("address not allowed")
|
||||
ErrNoAddresses = errors.New("No addresses for hostname")
|
||||
)
|
||||
|
||||
func IsZeroCIDR(cidr string) bool {
|
||||
if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR {
|
||||
return true
|
||||
@ -42,6 +49,46 @@ func IsZeroCIDR(cidr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsProxyableIP checks if a given IP address is permitted to be proxied
|
||||
func IsProxyableIP(ip string) error {
|
||||
netIP := net.ParseIP(ip)
|
||||
if netIP == nil {
|
||||
return ErrAddressNotAllowed
|
||||
}
|
||||
return isProxyableIP(netIP)
|
||||
}
|
||||
|
||||
func isProxyableIP(ip net.IP) error {
|
||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
||||
return ErrAddressNotAllowed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolver is an interface for net.Resolver
|
||||
type Resolver interface {
|
||||
LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error)
|
||||
}
|
||||
|
||||
// IsProxyableHostname checks if the IP addresses for a given hostname are permitted to be proxied
|
||||
func IsProxyableHostname(ctx context.Context, resolv Resolver, hostname string) error {
|
||||
resp, err := resolv.LookupIPAddr(ctx, hostname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(resp) == 0 {
|
||||
return ErrNoAddresses
|
||||
}
|
||||
|
||||
for _, host := range resp {
|
||||
if err := isProxyableIP(host.IP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsLocalIP(ip string) (bool, error) {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
|
69
vendor/k8s.io/kubernetes/pkg/proxy/util/utils_test.go
generated
vendored
69
vendor/k8s.io/kubernetes/pkg/proxy/util/utils_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
@ -27,6 +28,74 @@ import (
|
||||
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
|
||||
)
|
||||
|
||||
func TestIsProxyableIP(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ip string
|
||||
want error
|
||||
}{
|
||||
{"127.0.0.1", ErrAddressNotAllowed},
|
||||
{"127.0.0.2", ErrAddressNotAllowed},
|
||||
{"169.254.169.254", ErrAddressNotAllowed},
|
||||
{"169.254.1.1", ErrAddressNotAllowed},
|
||||
{"224.0.0.0", ErrAddressNotAllowed},
|
||||
{"10.0.0.1", nil},
|
||||
{"192.168.0.1", nil},
|
||||
{"172.16.0.1", nil},
|
||||
{"8.8.8.8", nil},
|
||||
{"::1", ErrAddressNotAllowed},
|
||||
{"fe80::", ErrAddressNotAllowed},
|
||||
{"ff02::", ErrAddressNotAllowed},
|
||||
{"ff01::", ErrAddressNotAllowed},
|
||||
{"2600::", nil},
|
||||
{"1", ErrAddressNotAllowed},
|
||||
{"", ErrAddressNotAllowed},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
got := IsProxyableIP(testCases[i].ip)
|
||||
if testCases[i].want != got {
|
||||
t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type dummyResolver struct {
|
||||
ips []string
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *dummyResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
resp := []net.IPAddr{}
|
||||
for _, ipString := range r.ips {
|
||||
resp = append(resp, net.IPAddr{IP: net.ParseIP(ipString)})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func TestIsProxyableHostname(t *testing.T) {
|
||||
testCases := []struct {
|
||||
hostname string
|
||||
ips []string
|
||||
want error
|
||||
}{
|
||||
{"k8s.io", []string{}, ErrNoAddresses},
|
||||
{"k8s.io", []string{"8.8.8.8"}, nil},
|
||||
{"k8s.io", []string{"169.254.169.254"}, ErrAddressNotAllowed},
|
||||
{"k8s.io", []string{"127.0.0.1", "8.8.8.8"}, ErrAddressNotAllowed},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
resolv := dummyResolver{ips: testCases[i].ips}
|
||||
got := IsProxyableHostname(context.Background(), &resolv, testCases[i].hostname)
|
||||
if testCases[i].want != got {
|
||||
t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldSkipService(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service *v1.Service
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go
generated
vendored
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/client"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
// nodeStrategy implements behavior for nodes
|
||||
@ -217,6 +218,10 @@ func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGet
|
||||
nil
|
||||
}
|
||||
|
||||
if err := proxyutil.IsProxyableHostname(ctx, &net.Resolver{}, info.Hostname); err != nil {
|
||||
return nil, nil, errors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
// Otherwise, return the requested scheme and port, and the proxy transport
|
||||
return &url.URL{Scheme: schemeReq, Host: net.JoinHostPort(info.Hostname, portReq)}, proxyTransport, nil
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD
generated
vendored
@ -20,6 +20,7 @@ go_library(
|
||||
"//pkg/apis/core/helper/qos:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go
generated
vendored
@ -47,6 +47,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/kubelet/client"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
// podStrategy implements behavior for Pods
|
||||
@ -290,6 +291,10 @@ func ResourceLocation(getter ResourceGetter, rt http.RoundTripper, ctx context.C
|
||||
}
|
||||
}
|
||||
|
||||
if err := proxyutil.IsProxyableIP(pod.Status.PodIP); err != nil {
|
||||
return nil, nil, errors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
loc := &url.URL{
|
||||
Scheme: scheme,
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go
generated
vendored
@ -93,7 +93,7 @@ func AddSystemPriorityClasses() genericapiserver.PostStartHookFunc {
|
||||
} else {
|
||||
// Unable to get the priority class for reasons other than "not found".
|
||||
klog.Warningf("unable to get PriorityClass %v: %v. Retrying...", pc.Name, err)
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
@ -17,7 +17,6 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are
|
||||
@ -92,6 +91,9 @@ type NodeLister interface {
|
||||
List() ([]*v1.Node, error)
|
||||
}
|
||||
|
||||
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||
type PodFilter func(*v1.Pod) bool
|
||||
|
||||
// PodLister interface represents anything that can list pods for a scheduler.
|
||||
type PodLister interface {
|
||||
// We explicitly return []*v1.Pod, instead of v1.PodList, to avoid
|
||||
@ -99,7 +101,7 @@ type PodLister interface {
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
// This is similar to "List()", but the returned slice does not
|
||||
// contain pods that don't pass `podFilter`.
|
||||
FilteredList(podFilter schedulerinternalcache.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
}
|
||||
|
||||
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
@ -351,7 +351,7 @@ func (g *genericScheduler) processPreemptionWithExtenders(
|
||||
// worth the complexity, especially because we generally expect to have a very
|
||||
// small number of nominated pods per node.
|
||||
func (g *genericScheduler) getLowerPriorityNominatedPods(pod *v1.Pod, nodeName string) []*v1.Pod {
|
||||
pods := g.schedulingQueue.WaitingPodsForNode(nodeName)
|
||||
pods := g.schedulingQueue.NominatedPodsForNode(nodeName)
|
||||
|
||||
if len(pods) == 0 {
|
||||
return nil
|
||||
@ -501,7 +501,7 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
||||
// This may happen only in tests.
|
||||
return false, meta, nodeInfo
|
||||
}
|
||||
nominatedPods := queue.WaitingPodsForNode(nodeInfo.Node().Name)
|
||||
nominatedPods := queue.NominatedPodsForNode(nodeInfo.Node().Name)
|
||||
if nominatedPods == nil || len(nominatedPods) == 0 {
|
||||
return false, meta, nodeInfo
|
||||
}
|
||||
@ -655,24 +655,26 @@ func PrioritizeNodes(
|
||||
|
||||
// DEPRECATED: we can remove this when all priorityConfigs implement the
|
||||
// Map-Reduce pattern.
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(priorityConfigs), func(i int) {
|
||||
priorityConfig := priorityConfigs[i]
|
||||
if priorityConfig.Function == nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
results[index], err = priorityConfigs[index].Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
}(i)
|
||||
} else {
|
||||
results[i] = make(schedulerapi.HostPriorityList, len(nodes))
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
results[i], err = priorityConfig.Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
|
||||
nodeInfo := nodeNameToInfo[nodes[index].Name]
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Function != nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -685,22 +687,22 @@ func PrioritizeNodes(
|
||||
}
|
||||
})
|
||||
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Reduce == nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Reduce == nil {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(index int, config algorithm.PriorityConfig) {
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
if err := priorityConfigs[index].Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
if klog.V(10) {
|
||||
for _, hostPriority := range results[index] {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score)
|
||||
}
|
||||
}
|
||||
}(i, priorityConfig)
|
||||
}(i)
|
||||
}
|
||||
// Wait for all computations to be finished.
|
||||
wg.Wait()
|
||||
@ -720,14 +722,14 @@ func PrioritizeNodes(
|
||||
|
||||
if len(extenders) != 0 && nodes != nil {
|
||||
combinedScores := make(map[string]int, len(nodeNameToInfo))
|
||||
for _, extender := range extenders {
|
||||
if !extender.IsInterested(pod) {
|
||||
for i := range extenders {
|
||||
if !extenders[i].IsInterested(pod) {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(ext algorithm.SchedulerExtender) {
|
||||
go func(extIndex int) {
|
||||
defer wg.Done()
|
||||
prioritizedList, weight, err := ext.Prioritize(pod, nodes)
|
||||
prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes)
|
||||
if err != nil {
|
||||
// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
|
||||
return
|
||||
@ -736,12 +738,12 @@ func PrioritizeNodes(
|
||||
for i := range *prioritizedList {
|
||||
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score)
|
||||
}
|
||||
combinedScores[host] += score * weight
|
||||
}
|
||||
mu.Unlock()
|
||||
}(extender)
|
||||
}(i)
|
||||
}
|
||||
// wait for all go routines to finish
|
||||
wg.Wait()
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
@ -73,6 +73,7 @@ go_test(
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
@ -992,7 +992,14 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
||||
}
|
||||
|
||||
c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode)
|
||||
c.podQueue.MoveAllToActiveQueue()
|
||||
// Only activate unschedulable pods if the node became more schedulable.
|
||||
// We skip the node property comparison when there is no unschedulable pods in the queue
|
||||
// to save processing cycles. We still trigger a move to active queue to cover the case
|
||||
// that a pod being processed by the scheduler is determined unschedulable. We want this
|
||||
// pod to be reevaluated when a change in the cluster happens.
|
||||
if c.podQueue.NumUnschedulablePods() == 0 || nodeSchedulingPropertiesChanged(newNode, oldNode) {
|
||||
c.podQueue.MoveAllToActiveQueue()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldNode *v1.Node) {
|
||||
@ -1064,6 +1071,53 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
|
||||
}
|
||||
}
|
||||
|
||||
func nodeSchedulingPropertiesChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
if nodeSpecUnschedulableChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeAllocatableChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeLabelsChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeTaintsChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeConditionsChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func nodeAllocatableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable)
|
||||
}
|
||||
|
||||
func nodeLabelsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return !reflect.DeepEqual(oldNode.GetLabels(), newNode.GetLabels())
|
||||
}
|
||||
|
||||
func nodeTaintsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return !reflect.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints)
|
||||
}
|
||||
|
||||
func nodeConditionsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus {
|
||||
conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions))
|
||||
for i := range conditions {
|
||||
conditionStatuses[conditions[i].Type] = conditions[i].Status
|
||||
}
|
||||
return conditionStatuses
|
||||
}
|
||||
return !reflect.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions))
|
||||
}
|
||||
|
||||
func nodeSpecUnschedulableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && newNode.Spec.Unschedulable == false
|
||||
}
|
||||
|
||||
func (c *configFactory) deleteNodeFromCache(obj interface{}) {
|
||||
var node *v1.Node
|
||||
switch t := obj.(type) {
|
||||
|
144
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
144
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -657,3 +658,146 @@ func testGetBinderFunc(expectedBinderType, podName string, extenders []algorithm
|
||||
t.Errorf("Expected binder %q but got %q", expectedBinderType, binderType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAllocatableChanged(t *testing.T) {
|
||||
newQuantity := func(value int64) resource.Quantity {
|
||||
return *resource.NewQuantity(value, resource.BinarySI)
|
||||
}
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldAllocatable v1.ResourceList
|
||||
NewAllocatable v1.ResourceList
|
||||
}{
|
||||
{
|
||||
Name: "no allocatable resources changed",
|
||||
Changed: false,
|
||||
OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
|
||||
NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
|
||||
},
|
||||
{
|
||||
Name: "new node has more allocatable resources",
|
||||
Changed: true,
|
||||
OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
|
||||
NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024), v1.ResourceStorage: newQuantity(1024)},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: c.OldAllocatable}}
|
||||
newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: c.NewAllocatable}}
|
||||
changed := nodeAllocatableChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("nodeAllocatableChanged should be %t, got %t", c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeLabelsChanged(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldLabels map[string]string
|
||||
NewLabels map[string]string
|
||||
}{
|
||||
{
|
||||
Name: "no labels changed",
|
||||
Changed: false,
|
||||
OldLabels: map[string]string{"foo": "bar"},
|
||||
NewLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
// Labels changed.
|
||||
{
|
||||
Name: "new node has more labels",
|
||||
Changed: true,
|
||||
OldLabels: map[string]string{"foo": "bar"},
|
||||
NewLabels: map[string]string{"foo": "bar", "test": "value"},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: c.OldLabels}}
|
||||
newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: c.NewLabels}}
|
||||
changed := nodeLabelsChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("Test case %q failed: should be %t, got %t", c.Name, c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeTaintsChanged(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldTaints []v1.Taint
|
||||
NewTaints []v1.Taint
|
||||
}{
|
||||
{
|
||||
Name: "no taint changed",
|
||||
Changed: false,
|
||||
OldTaints: []v1.Taint{{Key: "key", Value: "value"}},
|
||||
NewTaints: []v1.Taint{{Key: "key", Value: "value"}},
|
||||
},
|
||||
{
|
||||
Name: "taint value changed",
|
||||
Changed: true,
|
||||
OldTaints: []v1.Taint{{Key: "key", Value: "value1"}},
|
||||
NewTaints: []v1.Taint{{Key: "key", Value: "value2"}},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: c.OldTaints}}
|
||||
newNode := &v1.Node{Spec: v1.NodeSpec{Taints: c.NewTaints}}
|
||||
changed := nodeTaintsChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("Test case %q failed: should be %t, not %t", c.Name, c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeConditionsChanged(t *testing.T) {
|
||||
nodeConditionType := reflect.TypeOf(v1.NodeCondition{})
|
||||
if nodeConditionType.NumField() != 6 {
|
||||
t.Errorf("NodeCondition type has changed. The nodeConditionsChanged() function must be reevaluated.")
|
||||
}
|
||||
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldConditions []v1.NodeCondition
|
||||
NewConditions []v1.NodeCondition
|
||||
}{
|
||||
{
|
||||
Name: "no condition changed",
|
||||
Changed: false,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
|
||||
},
|
||||
{
|
||||
Name: "only LastHeartbeatTime changed",
|
||||
Changed: false,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
|
||||
},
|
||||
{
|
||||
Name: "new node has more healthy conditions",
|
||||
Changed: true,
|
||||
OldConditions: []v1.NodeCondition{},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
||||
},
|
||||
{
|
||||
Name: "new node has less unhealthy conditions",
|
||||
Changed: true,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
|
||||
NewConditions: []v1.NodeCondition{},
|
||||
},
|
||||
{
|
||||
Name: "condition status changed",
|
||||
Changed: true,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: c.OldConditions}}
|
||||
newNode := &v1.Node{Status: v1.NodeStatus{Conditions: c.NewConditions}}
|
||||
changed := nodeConditionsChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("Test case %q failed: should be %t, got %t", c.Name, c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"k8s.io/klog"
|
||||
@ -149,7 +150,7 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||
return cache.FilteredList(alwaysTrue, selector)
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
func (cache *schedulerCache) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
// podFilter is expected to return true for most or all of the pods. We
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/BUILD
generated
vendored
@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
|
||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/fake_cache.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/fake_cache.go
generated
vendored
@ -19,6 +19,7 @@ package fake
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@ -83,7 +84,7 @@ func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeI
|
||||
func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
||||
|
||||
// FilteredList is a fake method for testing.
|
||||
func (c *Cache) FilteredList(filter schedulerinternalcache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
func (c *Cache) FilteredList(filter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go
generated
vendored
@ -19,12 +19,10 @@ package cache
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||
type PodFilter func(*v1.Pod) bool
|
||||
|
||||
// Cache collects pods' information and provides node-level aggregated information.
|
||||
// It's intended for generic scheduler to do efficient lookup.
|
||||
// Cache's operations are pod centric. It does incremental updates based on pod events.
|
||||
@ -106,7 +104,7 @@ type Cache interface {
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
|
||||
// FilteredList returns all cached pods that pass the filter.
|
||||
FilteredList(filter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
FilteredList(filter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
|
||||
// Snapshot takes a snapshot on current cache
|
||||
Snapshot() *Snapshot
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD
generated
vendored
@ -12,6 +12,7 @@ go_library(
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
@ -22,9 +23,11 @@ go_test(
|
||||
srcs = ["scheduling_queue_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
213
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go
generated
vendored
213
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ktypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
@ -62,13 +63,18 @@ type SchedulingQueue interface {
|
||||
MoveAllToActiveQueue()
|
||||
AssignedPodAdded(pod *v1.Pod)
|
||||
AssignedPodUpdated(pod *v1.Pod)
|
||||
WaitingPodsForNode(nodeName string) []*v1.Pod
|
||||
NominatedPodsForNode(nodeName string) []*v1.Pod
|
||||
WaitingPods() []*v1.Pod
|
||||
// Close closes the SchedulingQueue so that the goroutine which is
|
||||
// waiting to pop items can exit gracefully.
|
||||
Close()
|
||||
// UpdateNominatedPodForNode adds the given pod to the nominated pod map or
|
||||
// updates it if it already exists.
|
||||
UpdateNominatedPodForNode(pod *v1.Pod, nodeName string)
|
||||
// DeleteNominatedPodIfExists deletes nominatedPod from internal cache
|
||||
DeleteNominatedPodIfExists(pod *v1.Pod)
|
||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||
NumUnschedulablePods() int
|
||||
}
|
||||
|
||||
// NewSchedulingQueue initializes a new scheduling queue. If pod priority is
|
||||
@ -148,9 +154,9 @@ func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {}
|
||||
// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue.
|
||||
func (f *FIFO) MoveAllToActiveQueue() {}
|
||||
|
||||
// WaitingPodsForNode returns pods that are nominated to run on the given node,
|
||||
// NominatedPodsForNode returns pods that are nominated to run on the given node,
|
||||
// but FIFO does not support it.
|
||||
func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||
func (f *FIFO) NominatedPodsForNode(nodeName string) []*v1.Pod {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -162,6 +168,14 @@ func (f *FIFO) Close() {
|
||||
// DeleteNominatedPodIfExists does nothing in FIFO.
|
||||
func (f *FIFO) DeleteNominatedPodIfExists(pod *v1.Pod) {}
|
||||
|
||||
// UpdateNominatedPodForNode does nothing in FIFO.
|
||||
func (f *FIFO) UpdateNominatedPodForNode(pod *v1.Pod, nodeName string) {}
|
||||
|
||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||
func (f *FIFO) NumUnschedulablePods() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// NewFIFO creates a FIFO object.
|
||||
func NewFIFO() *FIFO {
|
||||
return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
@ -187,10 +201,9 @@ type PriorityQueue struct {
|
||||
activeQ *Heap
|
||||
// unschedulableQ holds pods that have been tried and determined unschedulable.
|
||||
unschedulableQ *UnschedulablePodsMap
|
||||
// nominatedPods is a map keyed by a node name and the value is a list of
|
||||
// pods which are nominated to run on the node. These are pods which can be in
|
||||
// the activeQ or unschedulableQ.
|
||||
nominatedPods map[string][]*v1.Pod
|
||||
// nominatedPods is a structures that stores pods which are nominated to run
|
||||
// on nodes.
|
||||
nominatedPods *nominatedPodMap
|
||||
// receivedMoveRequest is set to true whenever we receive a request to move a
|
||||
// pod from the unschedulableQ to the activeQ, and is set to false, when we pop
|
||||
// a pod from the activeQ. It indicates if we received a move request when a
|
||||
@ -206,57 +219,38 @@ type PriorityQueue struct {
|
||||
// Making sure that PriorityQueue implements SchedulingQueue.
|
||||
var _ = SchedulingQueue(&PriorityQueue{})
|
||||
|
||||
// podTimeStamp returns pod's last schedule time or its creation time if the
|
||||
// scheduler has never tried scheduling it.
|
||||
func podTimestamp(pod *v1.Pod) *metav1.Time {
|
||||
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
if condition == nil {
|
||||
return &pod.CreationTimestamp
|
||||
}
|
||||
return &condition.LastTransitionTime
|
||||
}
|
||||
|
||||
// activeQComp is the function used by the activeQ heap algorithm to sort pods.
|
||||
// It sorts pods based on their priority. When priorities are equal, it uses
|
||||
// podTimestamp.
|
||||
func activeQComp(pod1, pod2 interface{}) bool {
|
||||
p1 := pod1.(*v1.Pod)
|
||||
p2 := pod2.(*v1.Pod)
|
||||
prio1 := util.GetPodPriority(p1)
|
||||
prio2 := util.GetPodPriority(p2)
|
||||
return (prio1 > prio2) || (prio1 == prio2 && podTimestamp(p1).Before(podTimestamp(p2)))
|
||||
}
|
||||
|
||||
// NewPriorityQueue creates a PriorityQueue object.
|
||||
func NewPriorityQueue() *PriorityQueue {
|
||||
pq := &PriorityQueue{
|
||||
activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod),
|
||||
activeQ: newHeap(cache.MetaNamespaceKeyFunc, activeQComp),
|
||||
unschedulableQ: newUnschedulablePodsMap(),
|
||||
nominatedPods: map[string][]*v1.Pod{},
|
||||
nominatedPods: newNominatedPodMap(),
|
||||
}
|
||||
pq.cond.L = &pq.lock
|
||||
return pq
|
||||
}
|
||||
|
||||
// addNominatedPodIfNeeded adds a pod to nominatedPods if it has a NominatedNodeName and it does not
|
||||
// already exist in the map. Adding an existing pod is not going to update the pod.
|
||||
func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) {
|
||||
nnn := NominatedNodeName(pod)
|
||||
if len(nnn) > 0 {
|
||||
for _, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
p.nominatedPods[nnn] = append(p.nominatedPods[nnn], pod)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteNominatedPodIfExists deletes a pod from the nominatedPods.
|
||||
// NOTE: this function assumes lock has been acquired in caller.
|
||||
func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) {
|
||||
nnn := NominatedNodeName(pod)
|
||||
if len(nnn) > 0 {
|
||||
for i, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...)
|
||||
if len(p.nominatedPods[nnn]) == 0 {
|
||||
delete(p.nominatedPods, nnn)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateNominatedPod updates a pod in the nominatedPods.
|
||||
func (p *PriorityQueue) updateNominatedPod(oldPod, newPod *v1.Pod) {
|
||||
// Even if the nominated node name of the Pod is not changed, we must delete and add it again
|
||||
// to ensure that its pointer is updated.
|
||||
p.deleteNominatedPodIfExists(oldPod)
|
||||
p.addNominatedPodIfNeeded(newPod)
|
||||
}
|
||||
|
||||
// Add adds a pod to the active queue. It should be called only when a new pod
|
||||
// is added so there is no chance the pod is already in either queue.
|
||||
func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
||||
@ -268,10 +262,9 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
||||
} else {
|
||||
if p.unschedulableQ.get(pod) != nil {
|
||||
klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name)
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.unschedulableQ.delete(pod)
|
||||
}
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -292,7 +285,7 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error {
|
||||
if err != nil {
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -317,12 +310,12 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error {
|
||||
}
|
||||
if !p.receivedMoveRequest && isPodUnschedulable(pod) {
|
||||
p.unschedulableQ.addOrUpdate(pod)
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
return nil
|
||||
}
|
||||
err := p.activeQ.Add(pod)
|
||||
if err == nil {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -373,13 +366,13 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
defer p.lock.Unlock()
|
||||
// If the pod is already in the active queue, just update it there.
|
||||
if _, exists, _ := p.activeQ.Get(newPod); exists {
|
||||
p.updateNominatedPod(oldPod, newPod)
|
||||
p.nominatedPods.update(oldPod, newPod)
|
||||
err := p.activeQ.Update(newPod)
|
||||
return err
|
||||
}
|
||||
// If the pod is in the unschedulable queue, updating it may make it schedulable.
|
||||
if usPod := p.unschedulableQ.get(newPod); usPod != nil {
|
||||
p.updateNominatedPod(oldPod, newPod)
|
||||
p.nominatedPods.update(oldPod, newPod)
|
||||
if isPodUpdated(oldPod, newPod) {
|
||||
p.unschedulableQ.delete(usPod)
|
||||
err := p.activeQ.Add(newPod)
|
||||
@ -394,7 +387,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
// If pod is not in any of the two queue, we put it in the active queue.
|
||||
err := p.activeQ.Add(newPod)
|
||||
if err == nil {
|
||||
p.addNominatedPodIfNeeded(newPod)
|
||||
p.nominatedPods.add(newPod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -405,7 +398,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
func (p *PriorityQueue) Delete(pod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.nominatedPods.delete(pod)
|
||||
err := p.activeQ.Delete(pod)
|
||||
if err != nil { // The item was probably not found in the activeQ.
|
||||
p.unschedulableQ.delete(pod)
|
||||
@ -488,16 +481,13 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod
|
||||
return podsToMove
|
||||
}
|
||||
|
||||
// WaitingPodsForNode returns pods that are nominated to run on the given node,
|
||||
// NominatedPodsForNode returns pods that are nominated to run on the given node,
|
||||
// but they are waiting for other pods to be removed from the node before they
|
||||
// can be actually scheduled.
|
||||
func (p *PriorityQueue) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||
func (p *PriorityQueue) NominatedPodsForNode(nodeName string) []*v1.Pod {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
if list, ok := p.nominatedPods[nodeName]; ok {
|
||||
return list
|
||||
}
|
||||
return nil
|
||||
return p.nominatedPods.podsForNode(nodeName)
|
||||
}
|
||||
|
||||
// WaitingPods returns all the waiting pods in the queue.
|
||||
@ -523,13 +513,30 @@ func (p *PriorityQueue) Close() {
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
// DeleteNominatedPodIfExists deletes pod from internal cache if it's a nominatedPod
|
||||
// DeleteNominatedPodIfExists deletes pod nominatedPods.
|
||||
func (p *PriorityQueue) DeleteNominatedPodIfExists(pod *v1.Pod) {
|
||||
p.lock.Lock()
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.nominatedPods.delete(pod)
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// UpdateNominatedPodForNode adds a pod to the nominated pods of the given node.
|
||||
// This is called during the preemption process after a node is nominated to run
|
||||
// the pod. We update the structure before sending a request to update the pod
|
||||
// object to avoid races with the following scheduling cycles.
|
||||
func (p *PriorityQueue) UpdateNominatedPodForNode(pod *v1.Pod, nodeName string) {
|
||||
p.lock.Lock()
|
||||
p.nominatedPods.add(pod, nodeName)
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||
func (p *PriorityQueue) NumUnschedulablePods() int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return len(p.unschedulableQ.pods)
|
||||
}
|
||||
|
||||
// UnschedulablePodsMap holds pods that cannot be scheduled. This data structure
|
||||
// is used to implement unschedulableQ.
|
||||
type UnschedulablePodsMap struct {
|
||||
@ -767,3 +774,77 @@ func newHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// nominatedPodMap is a structure that stores pods nominated to run on nodes.
|
||||
// It exists because nominatedNodeName of pod objects stored in the structure
|
||||
// may be different than what scheduler has here. We should be able to find pods
|
||||
// by their UID and update/delete them.
|
||||
type nominatedPodMap struct {
|
||||
// nominatedPods is a map keyed by a node name and the value is a list of
|
||||
// pods which are nominated to run on the node. These are pods which can be in
|
||||
// the activeQ or unschedulableQ.
|
||||
nominatedPods map[string][]*v1.Pod
|
||||
// nominatedPodToNode is map keyed by a Pod UID to the node name where it is
|
||||
// nominated.
|
||||
nominatedPodToNode map[ktypes.UID]string
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) add(p *v1.Pod, nodeName string) {
|
||||
// always delete the pod if it already exist, to ensure we never store more than
|
||||
// one instance of the pod.
|
||||
npm.delete(p)
|
||||
|
||||
nnn := nodeName
|
||||
if len(nnn) == 0 {
|
||||
nnn = NominatedNodeName(p)
|
||||
if len(nnn) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
npm.nominatedPodToNode[p.UID] = nnn
|
||||
for _, np := range npm.nominatedPods[nnn] {
|
||||
if np.UID == p.UID {
|
||||
klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", p.Namespace, p.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn], p)
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) delete(p *v1.Pod) {
|
||||
nnn, ok := npm.nominatedPodToNode[p.UID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for i, np := range npm.nominatedPods[nnn] {
|
||||
if np.UID == p.UID {
|
||||
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn][:i], npm.nominatedPods[nnn][i+1:]...)
|
||||
if len(npm.nominatedPods[nnn]) == 0 {
|
||||
delete(npm.nominatedPods, nnn)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
delete(npm.nominatedPodToNode, p.UID)
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) update(oldPod, newPod *v1.Pod) {
|
||||
// We update irrespective of the nominatedNodeName changed or not, to ensure
|
||||
// that pod pointer is updated.
|
||||
npm.delete(oldPod)
|
||||
npm.add(newPod, "")
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) podsForNode(nodeName string) []*v1.Pod {
|
||||
if list, ok := npm.nominatedPods[nodeName]; ok {
|
||||
return list
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNominatedPodMap() *nominatedPodMap {
|
||||
return &nominatedPodMap{
|
||||
nominatedPods: make(map[string][]*v1.Pod),
|
||||
nominatedPodToNode: make(map[ktypes.UID]string),
|
||||
}
|
||||
}
|
||||
|
189
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue_test.go
generated
vendored
189
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue_test.go
generated
vendored
@ -24,6 +24,8 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
@ -97,8 +99,14 @@ func TestPriorityQueue_Add(t *testing.T) {
|
||||
q.Add(&medPriorityPod)
|
||||
q.Add(&unschedulablePod)
|
||||
q.Add(&highPriorityPod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node1",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
@ -112,8 +120,8 @@ func TestPriorityQueue_Add(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &unschedulablePod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
if len(q.nominatedPods.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,8 +131,14 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) {
|
||||
q.AddIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddIfNotPresent(&medPriorityPod)
|
||||
q.AddIfNotPresent(&unschedulablePod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node1",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
@ -135,8 +149,8 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &unschedulablePod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
if len(q.nominatedPods.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
|
||||
}
|
||||
if q.unschedulableQ.get(&highPriNominatedPod) != &highPriNominatedPod {
|
||||
t.Errorf("Pod %v was not found in the unschedulableQ.", highPriNominatedPod.Name)
|
||||
@ -149,8 +163,15 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ.
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod},
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node1",
|
||||
highPriNominatedPod.UID: "node1",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
@ -161,7 +182,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
if len(q.nominatedPods.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have one element: %v", q.nominatedPods)
|
||||
}
|
||||
if q.unschedulableQ.get(&unschedulablePod) != &unschedulablePod {
|
||||
@ -178,8 +199,8 @@ func TestPriorityQueue_Pop(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 1 {
|
||||
t.Errorf("Expected medPriorityPod to be present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
if len(q.nominatedPods.nominatedPods["node1"]) != 1 {
|
||||
t.Errorf("Expected medPriorityPod to be present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
|
||||
}
|
||||
}()
|
||||
q.Add(&medPriorityPod)
|
||||
@ -192,7 +213,7 @@ func TestPriorityQueue_Update(t *testing.T) {
|
||||
if _, exists, _ := q.activeQ.Get(&highPriorityPod); !exists {
|
||||
t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 0 {
|
||||
if len(q.nominatedPods.nominatedPods) != 0 {
|
||||
t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
|
||||
}
|
||||
// Update highPriorityPod and add a nominatedNodeName to it.
|
||||
@ -200,7 +221,7 @@ func TestPriorityQueue_Update(t *testing.T) {
|
||||
if q.activeQ.data.Len() != 1 {
|
||||
t.Error("Expected only one item in activeQ.")
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
if len(q.nominatedPods.nominatedPods) != 1 {
|
||||
t.Errorf("Expected one item in nomindatePods map: %v", q.nominatedPods)
|
||||
}
|
||||
// Updating an unschedulable pod which is not in any of the two queues, should
|
||||
@ -233,11 +254,13 @@ func TestPriorityQueue_Delete(t *testing.T) {
|
||||
if _, exists, _ := q.activeQ.Get(&highPriNominatedPod); exists {
|
||||
t.Errorf("Didn't expect %v to be in activeQ.", highPriorityPod.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods)
|
||||
if len(q.nominatedPods.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods.nominatedPods)
|
||||
}
|
||||
q.Delete(&unschedulablePod)
|
||||
if len(q.nominatedPods) != 0 {
|
||||
if err := q.Delete(&unschedulablePod); err != nil {
|
||||
t.Errorf("delete failed: %v", err)
|
||||
}
|
||||
if len(q.nominatedPods.nominatedPods) != 0 {
|
||||
t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
|
||||
}
|
||||
}
|
||||
@ -309,7 +332,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_WaitingPodsForNode(t *testing.T) {
|
||||
func TestPriorityQueue_NominatedPodsForNode(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&medPriorityPod)
|
||||
q.Add(&unschedulablePod)
|
||||
@ -318,14 +341,83 @@ func TestPriorityQueue_WaitingPodsForNode(t *testing.T) {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name)
|
||||
}
|
||||
expectedList := []*v1.Pod{&medPriorityPod, &unschedulablePod}
|
||||
if !reflect.DeepEqual(expectedList, q.WaitingPodsForNode("node1")) {
|
||||
if !reflect.DeepEqual(expectedList, q.NominatedPodsForNode("node1")) {
|
||||
t.Error("Unexpected list of nominated Pods for node.")
|
||||
}
|
||||
if q.WaitingPodsForNode("node2") != nil {
|
||||
if q.NominatedPodsForNode("node2") != nil {
|
||||
t.Error("Expected list of nominated Pods for node2 to be empty.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
if err := q.Add(&medPriorityPod); err != nil {
|
||||
t.Errorf("add failed: %v", err)
|
||||
}
|
||||
// Update unschedulablePod on a different node than specified in the pod.
|
||||
q.UpdateNominatedPodForNode(&unschedulablePod, "node5")
|
||||
|
||||
// Update nominated node name of a pod on a node that is not specified in the pod object.
|
||||
q.UpdateNominatedPodForNode(&highPriorityPod, "node2")
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
highPriorityPod.UID: "node2",
|
||||
unschedulablePod.UID: "node5",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod},
|
||||
"node2": {&highPriorityPod},
|
||||
"node5": {&unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
// List of nominated pods shouldn't change after popping them from the queue.
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after popping pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
// Update one of the nominated pods that doesn't have nominatedNodeName in the
|
||||
// pod object. It should be updated correctly.
|
||||
q.UpdateNominatedPodForNode(&highPriorityPod, "node4")
|
||||
expectedNominatedPods = &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
highPriorityPod.UID: "node4",
|
||||
unschedulablePod.UID: "node5",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod},
|
||||
"node4": {&highPriorityPod},
|
||||
"node5": {&unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after updating pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
|
||||
// Delete a nominated pod that doesn't have nominatedNodeName in the pod
|
||||
// object. It should be deleted.
|
||||
q.DeleteNominatedPodIfExists(&highPriorityPod)
|
||||
expectedNominatedPods = &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node5",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod},
|
||||
"node5": {&unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after deleting pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnschedulablePodsMap(t *testing.T) {
|
||||
var pods = []*v1.Pod{
|
||||
{
|
||||
@ -512,3 +604,56 @@ func TestSchedulingQueue_Close(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecentlyTriedPodsGoBack tests that pods which are recently tried and are
|
||||
// unschedulable go behind other pods with the same priority. This behavior
|
||||
// ensures that an unschedulable pod does not block head of the queue when there
|
||||
// are frequent events that move pods to the active queue.
|
||||
func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
// Add a few pods to priority queue.
|
||||
for i := 0; i < 5; i++ {
|
||||
p := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("test-pod-%v", i),
|
||||
Namespace: "ns1",
|
||||
UID: types.UID(fmt.Sprintf("tp00%v", i)),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
}
|
||||
q.Add(&p)
|
||||
}
|
||||
// Simulate a pod being popped by the scheduler, determined unschedulable, and
|
||||
// then moved back to the active queue.
|
||||
p1, err := q.Pop()
|
||||
if err != nil {
|
||||
t.Errorf("Error while popping the head of the queue: %v", err)
|
||||
}
|
||||
// Update pod condition to unschedulable.
|
||||
podutil.UpdatePodCondition(&p1.Status, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: v1.PodReasonUnschedulable,
|
||||
Message: "fake scheduling failure",
|
||||
})
|
||||
// Put in the unschedulable queue.
|
||||
q.AddUnschedulableIfNotPresent(p1)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveQueue()
|
||||
// Simulation is over. Now let's pop all pods. The pod popped first should be
|
||||
// the last one we pop here.
|
||||
for i := 0; i < 5; i++ {
|
||||
p, err := q.Pop()
|
||||
if err != nil {
|
||||
t.Errorf("Error while popping pods from the queue: %v", err)
|
||||
}
|
||||
if (i == 4) != (p1 == p) {
|
||||
t.Errorf("A pod tried before is not the last pod popped: i: %v, pod name: %v", i, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
@ -328,11 +328,19 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e
|
||||
var nodeName = ""
|
||||
if node != nil {
|
||||
nodeName = node.Name
|
||||
// Update the scheduling queue with the nominated pod information. Without
|
||||
// this, there would be a race condition between the next scheduling cycle
|
||||
// and the time the scheduler receives a Pod Update for the nominated pod.
|
||||
sched.config.SchedulingQueue.UpdateNominatedPodForNode(preemptor, nodeName)
|
||||
|
||||
// Make a call to update nominated node name of the pod on the API server.
|
||||
err = sched.config.PodPreemptor.SetNominatedNodeName(preemptor, nodeName)
|
||||
if err != nil {
|
||||
klog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err)
|
||||
sched.config.SchedulingQueue.DeleteNominatedPodIfExists(preemptor)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, victim := range victims {
|
||||
if err := sched.config.PodPreemptor.DeletePod(victim); err != nil {
|
||||
klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/testing/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/testing/BUILD
generated
vendored
@ -18,7 +18,6 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/testing/fake_lister.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/testing/fake_lister.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
var _ algorithm.NodeLister = &FakeNodeLister{}
|
||||
@ -55,7 +54,7 @@ func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
}
|
||||
|
||||
// FilteredList returns pods matching a pod filter and a label selector.
|
||||
func (f FakePodLister) FilteredList(podFilter schedulerinternalcache.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
func (f FakePodLister) FilteredList(podFilter algorithm.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
for _, pod := range f {
|
||||
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
|
||||
selected = append(selected, pod)
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
libipvs "github.com/docker/libnetwork/ipvs"
|
||||
@ -34,6 +35,7 @@ import (
|
||||
type runner struct {
|
||||
exec utilexec.Interface
|
||||
ipvsHandle *libipvs.Handle
|
||||
mu sync.Mutex // Protect Netlink calls
|
||||
}
|
||||
|
||||
// Protocol is the IPVS service protocol type
|
||||
@ -58,6 +60,8 @@ func (runner *runner) AddVirtualServer(vs *VirtualServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.NewService(svc)
|
||||
}
|
||||
|
||||
@ -67,6 +71,8 @@ func (runner *runner) UpdateVirtualServer(vs *VirtualServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.UpdateService(svc)
|
||||
}
|
||||
|
||||
@ -76,6 +82,8 @@ func (runner *runner) DeleteVirtualServer(vs *VirtualServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.DelService(svc)
|
||||
}
|
||||
|
||||
@ -85,7 +93,10 @@ func (runner *runner) GetVirtualServer(vs *VirtualServer) (*VirtualServer, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
ipvsSvc, err := runner.ipvsHandle.GetService(svc)
|
||||
runner.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -98,7 +109,9 @@ func (runner *runner) GetVirtualServer(vs *VirtualServer) (*VirtualServer, error
|
||||
|
||||
// GetVirtualServers is part of ipvs.Interface.
|
||||
func (runner *runner) GetVirtualServers() ([]*VirtualServer, error) {
|
||||
runner.mu.Lock()
|
||||
ipvsSvcs, err := runner.ipvsHandle.GetServices()
|
||||
runner.mu.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -115,6 +128,8 @@ func (runner *runner) GetVirtualServers() ([]*VirtualServer, error) {
|
||||
|
||||
// Flush is part of ipvs.Interface. Currently we delete IPVS services one by one
|
||||
func (runner *runner) Flush() error {
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.Flush()
|
||||
}
|
||||
|
||||
@ -128,6 +143,8 @@ func (runner *runner) AddRealServer(vs *VirtualServer, rs *RealServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.NewDestination(svc, dst)
|
||||
}
|
||||
|
||||
@ -141,6 +158,8 @@ func (runner *runner) DeleteRealServer(vs *VirtualServer, rs *RealServer) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.DelDestination(svc, dst)
|
||||
}
|
||||
|
||||
@ -153,6 +172,8 @@ func (runner *runner) UpdateRealServer(vs *VirtualServer, rs *RealServer) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.UpdateDestination(svc, dst)
|
||||
}
|
||||
|
||||
@ -162,7 +183,9 @@ func (runner *runner) GetRealServers(vs *VirtualServer) ([]*RealServer, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
dsts, err := runner.ipvsHandle.GetDestinations(svc)
|
||||
runner.mu.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/util/mount/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/util/mount/BUILD
generated
vendored
@ -9,6 +9,7 @@ go_library(
|
||||
"exec_mount_unsupported.go",
|
||||
"fake.go",
|
||||
"mount.go",
|
||||
"mount_helper.go",
|
||||
"mount_linux.go",
|
||||
"mount_unsupported.go",
|
||||
"mount_windows.go",
|
||||
@ -67,6 +68,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"exec_mount_test.go",
|
||||
"mount_helper_test.go",
|
||||
"mount_linux_test.go",
|
||||
"mount_test.go",
|
||||
"mount_windows_test.go",
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/util/mount/fake.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/util/mount/fake.go
generated
vendored
@ -30,6 +30,8 @@ type FakeMounter struct {
|
||||
MountPoints []MountPoint
|
||||
Log []FakeAction
|
||||
Filesystem map[string]FileType
|
||||
// Error to return for a path when calling IsLikelyNotMountPoint
|
||||
MountCheckErrors map[string]error
|
||||
// Some tests run things in parallel, make sure the mounter does not produce
|
||||
// any golang's DATA RACE warnings.
|
||||
mutex sync.Mutex
|
||||
@ -119,6 +121,7 @@ func (f *FakeMounter) Unmount(target string) error {
|
||||
}
|
||||
f.MountPoints = newMountpoints
|
||||
f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget})
|
||||
delete(f.MountCheckErrors, target)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -141,7 +144,12 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
_, err := os.Stat(file)
|
||||
err := f.MountCheckErrors[file]
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = os.Stat(file)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
124
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// CleanupMountPoint unmounts the given path and
|
||||
// deletes the remaining directory if successful.
|
||||
// if extensiveMountPointCheck is true
|
||||
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
|
||||
// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs.
|
||||
func CleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool) error {
|
||||
// mounter.ExistsPath cannot be used because for containerized kubelet, we need to check
|
||||
// the path in the kubelet container, not on the host.
|
||||
pathExists, pathErr := PathExists(mountPath)
|
||||
if !pathExists {
|
||||
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
|
||||
return nil
|
||||
}
|
||||
corruptedMnt := IsCorruptedMnt(pathErr)
|
||||
if pathErr != nil && !corruptedMnt {
|
||||
return fmt.Errorf("Error checking path: %v", pathErr)
|
||||
}
|
||||
return doCleanupMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
|
||||
}
|
||||
|
||||
// doCleanupMountPoint unmounts the given path and
|
||||
// deletes the remaining directory if successful.
|
||||
// if extensiveMountPointCheck is true
|
||||
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
|
||||
// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs.
|
||||
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check
|
||||
// will be skipped
|
||||
func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
|
||||
if !corruptedMnt {
|
||||
var notMnt bool
|
||||
var err error
|
||||
if extensiveMountPointCheck {
|
||||
notMnt, err = IsNotMountPoint(mounter, mountPath)
|
||||
} else {
|
||||
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if notMnt {
|
||||
klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
|
||||
return os.Remove(mountPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmount the mount path
|
||||
klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
|
||||
if err := mounter.Unmount(mountPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
|
||||
if mntErr != nil {
|
||||
return mntErr
|
||||
}
|
||||
if notMnt {
|
||||
klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
|
||||
return os.Remove(mountPath)
|
||||
}
|
||||
return fmt.Errorf("Failed to unmount path %v", mountPath)
|
||||
}
|
||||
|
||||
// TODO: clean this up to use pkg/util/file/FileExists
|
||||
// PathExists returns true if the specified path exists.
|
||||
func PathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if IsCorruptedMnt(err) {
|
||||
return true, err
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// IsCorruptedMnt return true if err is about corrupted mount point
|
||||
func IsCorruptedMnt(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var underlyingError error
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return false
|
||||
case *os.PathError:
|
||||
underlyingError = pe.Err
|
||||
case *os.LinkError:
|
||||
underlyingError = pe.Err
|
||||
case *os.SyscallError:
|
||||
underlyingError = pe.Err
|
||||
}
|
||||
|
||||
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO
|
||||
}
|
152
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_test.go
generated
vendored
Normal file
152
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_test.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDoCleanupMountPoint(t *testing.T) {
|
||||
const testMount = "test-mount"
|
||||
const defaultPerm = 0750
|
||||
|
||||
tests := map[string]struct {
|
||||
corruptedMnt bool
|
||||
// Function that prepares the directory structure for the test under
|
||||
// the given base directory.
|
||||
// Returns a fake MountPoint, a fake error for the mount point,
|
||||
// and error if the prepare function encountered a fatal error.
|
||||
prepare func(base string) (MountPoint, error, error)
|
||||
expectErr bool
|
||||
}{
|
||||
"mount-ok": {
|
||||
prepare: func(base string) (MountPoint, error, error) {
|
||||
path := filepath.Join(base, testMount)
|
||||
if err := os.MkdirAll(path, defaultPerm); err != nil {
|
||||
return MountPoint{}, nil, err
|
||||
}
|
||||
return MountPoint{Device: "/dev/sdb", Path: path}, nil, nil
|
||||
},
|
||||
},
|
||||
"mount-corrupted": {
|
||||
prepare: func(base string) (MountPoint, error, error) {
|
||||
path := filepath.Join(base, testMount)
|
||||
if err := os.MkdirAll(path, defaultPerm); err != nil {
|
||||
return MountPoint{}, nil, err
|
||||
}
|
||||
return MountPoint{Device: "/dev/sdb", Path: path}, os.NewSyscallError("fake", syscall.ESTALE), nil
|
||||
},
|
||||
corruptedMnt: true,
|
||||
},
|
||||
"mount-err-not-corrupted": {
|
||||
prepare: func(base string) (MountPoint, error, error) {
|
||||
path := filepath.Join(base, testMount)
|
||||
if err := os.MkdirAll(path, defaultPerm); err != nil {
|
||||
return MountPoint{}, nil, err
|
||||
}
|
||||
return MountPoint{Device: "/dev/sdb", Path: path}, os.NewSyscallError("fake", syscall.ETIMEDOUT), nil
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "unmount-mount-point-test")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tmpdir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
if tt.prepare == nil {
|
||||
t.Fatalf("prepare function required")
|
||||
}
|
||||
|
||||
mountPoint, mountError, err := tt.prepare(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare test: %v", err)
|
||||
}
|
||||
|
||||
fake := &FakeMounter{
|
||||
MountPoints: []MountPoint{mountPoint},
|
||||
MountCheckErrors: map[string]error{mountPoint.Path: mountError},
|
||||
}
|
||||
|
||||
err = doCleanupMountPoint(mountPoint.Path, fake, true, tt.corruptedMnt)
|
||||
if tt.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("test %s failed, expected error, got none", name)
|
||||
}
|
||||
if err := validateDirExists(mountPoint.Path); err != nil {
|
||||
t.Errorf("test %s failed, mount path doesn't exist: %v", name, err)
|
||||
}
|
||||
}
|
||||
if !tt.expectErr {
|
||||
if err != nil {
|
||||
t.Errorf("test %s failed: %v", name, err)
|
||||
}
|
||||
if err := validateDirNotExists(mountPoint.Path); err != nil {
|
||||
t.Errorf("test %s failed, mount path still exists: %v", name, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func validateDirEmpty(dir string) error {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(files) != 0 {
|
||||
return fmt.Errorf("Directory %q is not empty", dir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirNotExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("dir %q still exists", dir)
|
||||
}
|
||||
|
||||
func validateFileExists(file string) error {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user