mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/build/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/build/BUILD
generated
vendored
@ -62,7 +62,7 @@ DOCKERIZED_BINARIES = {
|
||||
|
||||
[docker_bundle(
|
||||
name = binary,
|
||||
# TODO(thockin): remove the google_containers name after release 1.10.
|
||||
# TODO(thockin): remove the google_containers name after release 1.11.
|
||||
images = {
|
||||
"k8s.gcr.io/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal",
|
||||
"gcr.io/google_containers/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal",
|
||||
|
2
vendor/k8s.io/kubernetes/build/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/build/OWNERS
generated
vendored
@ -4,7 +4,6 @@ reviewers:
|
||||
- jbeda
|
||||
- lavalamp
|
||||
- zmerlynn
|
||||
- spxtr
|
||||
approvers:
|
||||
- cblecker
|
||||
- ixdy
|
||||
@ -12,4 +11,3 @@ approvers:
|
||||
- lavalamp
|
||||
- zmerlynn
|
||||
- mikedanese
|
||||
- spxtr
|
||||
|
23
vendor/k8s.io/kubernetes/build/README.md
generated
vendored
23
vendor/k8s.io/kubernetes/build/README.md
generated
vendored
@ -5,8 +5,8 @@ Building Kubernetes is easy if you take advantage of the containerized build env
|
||||
## Requirements
|
||||
|
||||
1. Docker, using one of the following configurations:
|
||||
* **Mac OS X** You can either use Docker for Mac or docker-machine. See installation instructions [here](https://docs.docker.com/docker-for-mac/).
|
||||
**Note**: You will want to set the Docker VM to have at least 3GB of initial memory or building will likely fail. (See: [#11852]( http://issue.k8s.io/11852)).
|
||||
* **macOS** You can either use Docker for Mac or docker-machine. See installation instructions [here](https://docs.docker.com/docker-for-mac/).
|
||||
**Note**: You will want to set the Docker VM to have at least 4.5GB of initial memory or building will likely fail. (See: [#11852]( http://issue.k8s.io/11852)).
|
||||
* **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS.
|
||||
* **Remote Docker engine** Use a big machine in the cloud to build faster. This is a little trickier so look at the section later on.
|
||||
2. **Optional** [Google Cloud SDK](https://developers.google.com/cloud/sdk/)
|
||||
@ -107,4 +107,23 @@ In addition, there are some other tar files that are created:
|
||||
|
||||
When building final release tars, they are first staged into `_output/release-stage` before being tar'd up and put into `_output/release-tars`.
|
||||
|
||||
## Reproducibility
|
||||
`make release`, its variant `make quick-release`, and Bazel all provide a
|
||||
hermetic build environment which should provide some level of reproducibility
|
||||
for builds. `make` itself is **not** hermetic.
|
||||
|
||||
The Kubernetes build environment supports the [`SOURCE_DATE_EPOCH` environment
|
||||
variable](https://reproducible-builds.org/specs/source-date-epoch/) specified by
|
||||
the Reproducible Builds project, which can be set to a UNIX epoch timestamp.
|
||||
This will be used for the build timestamps embedded in compiled Go binaries,
|
||||
and maybe someday also Docker images.
|
||||
|
||||
One reasonable setting for this variable is to use the commit timestamp from the
|
||||
tip of the tree being built; this is what the Kubernetes CI system uses. For
|
||||
example, you could use the following one-liner:
|
||||
|
||||
```bash
|
||||
SOURCE_DATE_EPOCH=$(git show -s --format=format:%ct HEAD)
|
||||
```
|
||||
|
||||
[]()
|
||||
|
45
vendor/k8s.io/kubernetes/build/bindata.bzl
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/build/bindata.bzl
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Genrule wrapper around the go-bindata utility.
|
||||
# IMPORTANT: Any changes to this rule may also require changes to hack/generate-bindata.sh.
|
||||
def go_bindata(
|
||||
name, srcs, outs,
|
||||
compress=True,
|
||||
include_metadata=True,
|
||||
pkg="generated",
|
||||
ignores=["\.jpg", "\.png", "\.md", "BUILD(\.bazel)?"],
|
||||
**kw):
|
||||
|
||||
args = []
|
||||
for ignore in ignores:
|
||||
args.extend(["-ignore", "'%s'" % ignore])
|
||||
if not include_metadata:
|
||||
args.append("-nometadata")
|
||||
if not compress:
|
||||
args.append("-nocompress")
|
||||
|
||||
native.genrule(
|
||||
name = name,
|
||||
srcs = srcs,
|
||||
outs = outs,
|
||||
cmd = """
|
||||
$(location //vendor/github.com/jteeuwen/go-bindata/go-bindata:go-bindata) \
|
||||
-o "$@" -pkg %s -prefix $$(pwd) %s $(SRCS)
|
||||
""" % (pkg, " ".join(args)),
|
||||
tools = [
|
||||
"//vendor/github.com/jteeuwen/go-bindata/go-bindata",
|
||||
],
|
||||
**kw
|
||||
)
|
4
vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile
generated
vendored
4
vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile
generated
vendored
@ -15,7 +15,7 @@
|
||||
# This file creates a standard build environment for building cross
|
||||
# platform go binary for the architecture kubernetes cares about.
|
||||
|
||||
FROM golang:1.9.3
|
||||
FROM golang:1.10.3
|
||||
|
||||
ENV GOARM 7
|
||||
ENV KUBE_DYNAMIC_CROSSPLATFORMS \
|
||||
@ -71,7 +71,7 @@ RUN go get golang.org/x/tools/cmd/cover \
|
||||
golang.org/x/tools/cmd/goimports
|
||||
|
||||
# Download and symlink etcd. We need this for our integration tests.
|
||||
RUN export ETCD_VERSION=v3.2.14; \
|
||||
RUN export ETCD_VERSION=v3.2.18; \
|
||||
mkdir -p /usr/local/src/etcd \
|
||||
&& cd /usr/local/src/etcd \
|
||||
&& curl -fsSL https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | tar -xz \
|
||||
|
2
vendor/k8s.io/kubernetes/build/build-image/cross/VERSION
generated
vendored
2
vendor/k8s.io/kubernetes/build/build-image/cross/VERSION
generated
vendored
@ -1 +1 @@
|
||||
v1.9.3-2
|
||||
v1.10.3-1
|
||||
|
2
vendor/k8s.io/kubernetes/build/build-image/rsyncd.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/build-image/rsyncd.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
22
vendor/k8s.io/kubernetes/build/common.sh
generated
vendored
22
vendor/k8s.io/kubernetes/build/common.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
@ -29,13 +29,13 @@ DOCKER_MACHINE_NAME=${DOCKER_MACHINE_NAME:-"kube-dev"}
|
||||
readonly DOCKER_MACHINE_DRIVER=${DOCKER_MACHINE_DRIVER:-"virtualbox --virtualbox-cpu-count -1"}
|
||||
|
||||
# This will canonicalize the path
|
||||
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd -P)
|
||||
KUBE_ROOT=$(cd "$(dirname "${BASH_SOURCE}")"/.. && pwd -P)
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# Constants
|
||||
readonly KUBE_BUILD_IMAGE_REPO=kube-build
|
||||
readonly KUBE_BUILD_IMAGE_CROSS_TAG="$(cat ${KUBE_ROOT}/build/build-image/cross/VERSION)"
|
||||
readonly KUBE_BUILD_IMAGE_CROSS_TAG="$(cat "${KUBE_ROOT}/build/build-image/cross/VERSION")"
|
||||
|
||||
# This version number is used to cause everyone to rebuild their data containers
|
||||
# and build image. This is especially useful for automated build systems like
|
||||
@ -43,7 +43,7 @@ readonly KUBE_BUILD_IMAGE_CROSS_TAG="$(cat ${KUBE_ROOT}/build/build-image/cross/
|
||||
#
|
||||
# Increment/change this number if you change the build image (anything under
|
||||
# build/build-image) or change the set of volumes in the data container.
|
||||
readonly KUBE_BUILD_IMAGE_VERSION_BASE="$(cat ${KUBE_ROOT}/build/build-image/VERSION)"
|
||||
readonly KUBE_BUILD_IMAGE_VERSION_BASE="$(cat "${KUBE_ROOT}/build/build-image/VERSION")"
|
||||
readonly KUBE_BUILD_IMAGE_VERSION="${KUBE_BUILD_IMAGE_VERSION_BASE}-${KUBE_BUILD_IMAGE_CROSS_TAG}"
|
||||
|
||||
# Here we map the output directories across both the local and remote _output
|
||||
@ -232,7 +232,7 @@ function kube::build::prepare_docker_machine() {
|
||||
|
||||
docker-machine inspect "${DOCKER_MACHINE_NAME}" &> /dev/null || {
|
||||
kube::log::status "Creating a machine to build Kubernetes"
|
||||
docker-machine create --driver ${DOCKER_MACHINE_DRIVER} \
|
||||
docker-machine create --driver "${DOCKER_MACHINE_DRIVER}" \
|
||||
--virtualbox-memory "${virtualbox_memory_mb}" \
|
||||
--engine-env HTTP_PROXY="${KUBERNETES_HTTP_PROXY:-}" \
|
||||
--engine-env HTTPS_PROXY="${KUBERNETES_HTTPS_PROXY:-}" \
|
||||
@ -249,13 +249,13 @@ function kube::build::prepare_docker_machine() {
|
||||
local docker_machine_out
|
||||
while ! docker_machine_out=$(docker-machine env "${DOCKER_MACHINE_NAME}" 2>&1); do
|
||||
if [[ ${docker_machine_out} =~ "Error checking TLS connection" ]]; then
|
||||
echo ${docker_machine_out}
|
||||
echo "${docker_machine_out}"
|
||||
docker-machine regenerate-certs ${DOCKER_MACHINE_NAME}
|
||||
else
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
eval $(docker-machine env "${DOCKER_MACHINE_NAME}")
|
||||
eval "$(docker-machine env "${DOCKER_MACHINE_NAME}")"
|
||||
kube::log::status "A Docker host using docker-machine named '${DOCKER_MACHINE_NAME}' is ready to go!"
|
||||
return 0
|
||||
}
|
||||
@ -354,7 +354,7 @@ function kube::build::docker_image_exists() {
|
||||
function kube::build::docker_delete_old_images() {
|
||||
# In Docker 1.12, we can replace this with
|
||||
# docker images "$1" --format "{{.Tag}}"
|
||||
for tag in $("${DOCKER[@]}" images ${1} | tail -n +2 | awk '{print $2}') ; do
|
||||
for tag in $("${DOCKER[@]}" images "${1}" | tail -n +2 | awk '{print $2}') ; do
|
||||
if [[ "${tag}" != "${2}"* ]] ; then
|
||||
V=3 kube::log::status "Keeping image ${1}:${tag}"
|
||||
continue
|
||||
@ -434,7 +434,7 @@ function kube::build::clean() {
|
||||
kube::build::docker_delete_old_images "${KUBE_BUILD_IMAGE_REPO}" "${KUBE_BUILD_IMAGE_TAG_BASE}"
|
||||
|
||||
V=2 kube::log::status "Cleaning all untagged docker images"
|
||||
"${DOCKER[@]}" rmi $("${DOCKER[@]}" images -q --filter 'dangling=true') 2> /dev/null || true
|
||||
"${DOCKER[@]}" rmi "$("${DOCKER[@]}" images -q --filter 'dangling=true')" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
if [[ -d "${LOCAL_OUTPUT_ROOT}" ]]; then
|
||||
@ -451,8 +451,8 @@ function kube::build::build_image() {
|
||||
|
||||
cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
|
||||
|
||||
cp ${KUBE_ROOT}/build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
|
||||
cp ${KUBE_ROOT}/build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
|
||||
cp "${KUBE_ROOT}/build/build-image/Dockerfile" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
|
||||
cp "${KUBE_ROOT}/build/build-image/rsyncd.sh" "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
|
||||
dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
|
||||
chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/build/copy-output.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/copy-output.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
1
vendor/k8s.io/kubernetes/build/debian-hyperkube-base/Dockerfile
generated
vendored
1
vendor/k8s.io/kubernetes/build/debian-hyperkube-base/Dockerfile
generated
vendored
@ -40,6 +40,7 @@ RUN echo CACHEBUST>/dev/null && clean-install \
|
||||
openssh-client \
|
||||
nfs-common \
|
||||
socat \
|
||||
udev \
|
||||
util-linux
|
||||
|
||||
COPY cni-bin/bin /opt/cni/bin
|
||||
|
2
vendor/k8s.io/kubernetes/build/debian-hyperkube-base/Makefile
generated
vendored
2
vendor/k8s.io/kubernetes/build/debian-hyperkube-base/Makefile
generated
vendored
@ -19,7 +19,7 @@
|
||||
|
||||
REGISTRY?=staging-k8s.gcr.io
|
||||
IMAGE?=debian-hyperkube-base
|
||||
TAG=0.9
|
||||
TAG=0.10
|
||||
ARCH?=amd64
|
||||
CACHEBUST?=1
|
||||
|
||||
|
18
vendor/k8s.io/kubernetes/build/debs/10-kubeadm.conf
generated
vendored
18
vendor/k8s.io/kubernetes/build/debs/10-kubeadm.conf
generated
vendored
@ -1,13 +1,11 @@
|
||||
# Note: This dropin only works with kubeadm and kubelet v1.11+
|
||||
[Service]
|
||||
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
|
||||
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true"
|
||||
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||
Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local"
|
||||
Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt"
|
||||
# Value should match Docker daemon settings.
|
||||
# Defaults are "cgroupfs" for Debian/Ubuntu/OpenSUSE and "systemd" for Fedora/CentOS/RHEL
|
||||
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
|
||||
Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port=0"
|
||||
Environment="KUBELET_CERTIFICATE_ARGS=--rotate-certificates=true"
|
||||
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
|
||||
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
|
||||
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
|
||||
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
|
||||
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
|
||||
EnvironmentFile=-/etc/default/kubelet
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CGROUP_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_EXTRA_ARGS
|
||||
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
|
||||
|
15
vendor/k8s.io/kubernetes/build/debs/BUILD
generated
vendored
15
vendor/k8s.io/kubernetes/build/debs/BUILD
generated
vendored
@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"])
|
||||
load("@io_kubernetes_build//defs:deb.bzl", "k8s_deb", "deb_data")
|
||||
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
load("//build:workspace.bzl", "CRI_TOOLS_VERSION")
|
||||
|
||||
# We do not include kube-scheduler, kube-controller-manager,
|
||||
# kube-apiserver, and kube-proxy in this list even though we
|
||||
@ -13,6 +14,7 @@ release_filegroup(
|
||||
name = "debs",
|
||||
srcs = [
|
||||
":cloud-controller-manager.deb",
|
||||
":cri-tools.deb",
|
||||
":kubeadm.deb",
|
||||
":kubectl.deb",
|
||||
":kubelet.deb",
|
||||
@ -86,6 +88,12 @@ pkg_tar(
|
||||
deps = ["@kubernetes_cni//file"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "cri-tools-data",
|
||||
package_dir = "/usr/bin",
|
||||
deps = ["@cri_tools//file"],
|
||||
)
|
||||
|
||||
k8s_deb(
|
||||
name = "cloud-controller-manager",
|
||||
description = "Kubernetes Cloud Controller Manager",
|
||||
@ -156,6 +164,7 @@ k8s_deb(
|
||||
description = """Kubernetes Cluster Bootstrapping Tool
|
||||
The Kubernetes command line tool for bootstrapping a Kubernetes cluster.
|
||||
""",
|
||||
postinst = "postinst",
|
||||
version_file = "//build:os_package_version",
|
||||
)
|
||||
|
||||
@ -167,6 +176,12 @@ The Container Networking Interface tools for provisioning container networks.
|
||||
version_file = "//build:cni_package_version",
|
||||
)
|
||||
|
||||
k8s_deb(
|
||||
name = "cri-tools",
|
||||
description = """Container Runtime Interface tools (crictl)""",
|
||||
version = CRI_TOOLS_VERSION,
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
|
30
vendor/k8s.io/kubernetes/build/debs/postinst
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/build/debs/postinst
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
# see: dh_installdeb(1)
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# because kubeadm package adds kubelet drop-ins, we must daemon-reload
|
||||
# and restart kubelet now. restarting kubelet is ok because kubelet
|
||||
# postinst configure step auto-starts it.
|
||||
systemctl daemon-reload 2>/dev/null || true
|
||||
systemctl restart kubelet 2>/dev/null || true
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# dh_installdeb will replace this with shell code automatically
|
||||
# generated by other debhelper scripts.
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
53
vendor/k8s.io/kubernetes/build/lib/release.sh
generated
vendored
53
vendor/k8s.io/kubernetes/build/lib/release.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@ -67,9 +67,9 @@ function kube::release::parse_and_validate_ci_version() {
|
||||
# Build final release artifacts
|
||||
function kube::release::clean_cruft() {
|
||||
# Clean out cruft
|
||||
find ${RELEASE_STAGE} -name '*~' -exec rm {} \;
|
||||
find ${RELEASE_STAGE} -name '#*#' -exec rm {} \;
|
||||
find ${RELEASE_STAGE} -name '.DS*' -exec rm {} \;
|
||||
find "${RELEASE_STAGE}" -name '*~' -exec rm {} \;
|
||||
find "${RELEASE_STAGE}" -name '#*#' -exec rm {} \;
|
||||
find "${RELEASE_STAGE}" -name '.DS*' -exec rm {} \;
|
||||
}
|
||||
|
||||
function kube::release::package_tarballs() {
|
||||
@ -154,7 +154,7 @@ function kube::release::package_node_tarballs() {
|
||||
local platform
|
||||
for platform in "${KUBE_NODE_PLATFORMS[@]}"; do
|
||||
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
|
||||
local arch=$(basename ${platform})
|
||||
local arch=$(basename "${platform}")
|
||||
kube::log::status "Building tarball: node $platform_tag"
|
||||
|
||||
local release_stage="${RELEASE_STAGE}/node/${platform_tag}/kubernetes"
|
||||
@ -198,7 +198,7 @@ function kube::release::package_server_tarballs() {
|
||||
local platform
|
||||
for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
|
||||
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
|
||||
local arch=$(basename ${platform})
|
||||
local arch=$(basename "${platform}")
|
||||
kube::log::status "Building tarball: server $platform_tag"
|
||||
|
||||
local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
|
||||
@ -280,12 +280,12 @@ function kube::release::create_docker_images_for_server() {
|
||||
local binary_dir="$1"
|
||||
local arch="$2"
|
||||
local binary_name
|
||||
local binaries=($(kube::build::get_docker_wrapped_binaries ${arch}))
|
||||
local binaries=($(kube::build::get_docker_wrapped_binaries "${arch}"))
|
||||
local images_dir="${RELEASE_IMAGES}/${arch}"
|
||||
mkdir -p "${images_dir}"
|
||||
|
||||
local -r docker_registry="k8s.gcr.io"
|
||||
# TODO(thockin): Remove all traces of this after 1.10 release.
|
||||
# TODO(thockin): Remove all traces of this after 1.11 release.
|
||||
# The following is the old non-indirected registry name. To ease the
|
||||
# transition to the new name (above), we are double-tagging saved images.
|
||||
local -r deprecated_registry="gcr.io/google_containers"
|
||||
@ -325,16 +325,16 @@ function kube::release::create_docker_images_for_server() {
|
||||
|
||||
kube::log::status "Starting docker build for image: ${binary_name}-${arch}"
|
||||
(
|
||||
rm -rf ${docker_build_path}
|
||||
mkdir -p ${docker_build_path}
|
||||
ln ${binary_dir}/${binary_name} ${docker_build_path}/${binary_name}
|
||||
printf " FROM ${base_image} \n ADD ${binary_name} /usr/local/bin/${binary_name}\n" > ${docker_file_path}
|
||||
rm -rf "${docker_build_path}"
|
||||
mkdir -p "${docker_build_path}"
|
||||
ln "${binary_dir}/${binary_name}" "${docker_build_path}/${binary_name}"
|
||||
printf " FROM ${base_image} \n ADD ${binary_name} /usr/local/bin/${binary_name}\n" > "${docker_file_path}"
|
||||
|
||||
"${DOCKER[@]}" build --pull -q -t "${docker_image_tag}" ${docker_build_path} >/dev/null
|
||||
"${DOCKER[@]}" tag "${docker_image_tag}" ${deprecated_image_tag} >/dev/null
|
||||
"${DOCKER[@]}" save "${docker_image_tag}" ${deprecated_image_tag} > "${binary_dir}/${binary_name}.tar"
|
||||
echo "${docker_tag}" > ${binary_dir}/${binary_name}.docker_tag
|
||||
rm -rf ${docker_build_path}
|
||||
"${DOCKER[@]}" build --pull -q -t "${docker_image_tag}" "${docker_build_path}" >/dev/null
|
||||
"${DOCKER[@]}" tag "${docker_image_tag}" "${deprecated_image_tag}" >/dev/null
|
||||
"${DOCKER[@]}" save "${docker_image_tag}" "${deprecated_image_tag}" > "${binary_dir}/${binary_name}.tar"
|
||||
echo "${docker_tag}" > "${binary_dir}/${binary_name}.docker_tag"
|
||||
rm -rf "${docker_build_path}"
|
||||
ln "${binary_dir}/${binary_name}.tar" "${images_dir}/"
|
||||
|
||||
# If we are building an official/alpha/beta release we want to keep
|
||||
@ -350,8 +350,8 @@ function kube::release::create_docker_images_for_server() {
|
||||
else
|
||||
# not a release
|
||||
kube::log::status "Deleting docker image ${docker_image_tag}"
|
||||
"${DOCKER[@]}" rmi ${docker_image_tag} &>/dev/null || true
|
||||
"${DOCKER[@]}" rmi ${deprecated_image_tag} &>/dev/null || true
|
||||
"${DOCKER[@]}" rmi "${docker_image_tag}" &>/dev/null || true
|
||||
"${DOCKER[@]}" rmi "${deprecated_image_tag}" &>/dev/null || true
|
||||
fi
|
||||
) &
|
||||
done
|
||||
@ -382,6 +382,7 @@ function kube::release::package_kube_manifests_tarball() {
|
||||
cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/etcd.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kms-plugin-container.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}"
|
||||
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
|
||||
@ -389,7 +390,15 @@ function kube::release::package_kube_manifests_tarball() {
|
||||
cp "${src_dir}/glbc.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/rescheduler.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/etcd-empty-dir-cleanup.yaml" "${dst_dir}/"
|
||||
local internal_manifest
|
||||
for internal_manifest in $(ls "${src_dir}" | grep "^internal-*"); do
|
||||
cp "${src_dir}/${internal_manifest}" "${dst_dir}"
|
||||
done
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
|
||||
if [[ -e "${KUBE_ROOT}/cluster/gce/gci/gke-internal-configure-helper.sh" ]]; then
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/gke-internal-configure-helper.sh" "${dst_dir}/"
|
||||
fi
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
|
||||
local objects
|
||||
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
|
||||
@ -434,7 +443,7 @@ function kube::release::package_test_tarball() {
|
||||
# Add the test image files
|
||||
mkdir -p "${release_stage}/test/images"
|
||||
cp -fR "${KUBE_ROOT}/test/images" "${release_stage}/test/"
|
||||
tar c ${KUBE_TEST_PORTABLE[@]} | tar x -C ${release_stage}
|
||||
tar c "${KUBE_TEST_PORTABLE[@]}" | tar x -C "${release_stage}"
|
||||
|
||||
kube::release::clean_cruft
|
||||
|
||||
@ -477,14 +486,10 @@ Server binary tarballs are no longer included in the Kubernetes final tarball.
|
||||
Run cluster/get-kube-binaries.sh to download client and server binaries.
|
||||
EOF
|
||||
|
||||
mkdir -p "${release_stage}/third_party"
|
||||
cp -R "${KUBE_ROOT}/third_party/htpasswd" "${release_stage}/third_party/htpasswd"
|
||||
|
||||
# Include hack/lib as a dependency for the cluster/ scripts
|
||||
mkdir -p "${release_stage}/hack"
|
||||
cp -R "${KUBE_ROOT}/hack/lib" "${release_stage}/hack/"
|
||||
|
||||
cp -R "${KUBE_ROOT}/examples" "${release_stage}/"
|
||||
cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
|
||||
|
2
vendor/k8s.io/kubernetes/build/make-build-image.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/make-build-image.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
2
vendor/k8s.io/kubernetes/build/make-clean.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/make-clean.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
2
vendor/k8s.io/kubernetes/build/package-tarballs.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/package-tarballs.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
2
vendor/k8s.io/kubernetes/build/release-in-a-container.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/release-in-a-container.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
2
vendor/k8s.io/kubernetes/build/release-tars/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/build/release-tars/BUILD
generated
vendored
@ -193,9 +193,7 @@ pkg_tar(
|
||||
"//:version",
|
||||
"//cluster:all-srcs",
|
||||
"//docs:all-srcs",
|
||||
"//examples:all-srcs",
|
||||
"//hack/lib:all-srcs",
|
||||
"//third_party/htpasswd:all-srcs",
|
||||
],
|
||||
extension = "tar.gz",
|
||||
package_dir = "kubernetes",
|
||||
|
2
vendor/k8s.io/kubernetes/build/release.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/release.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
1
vendor/k8s.io/kubernetes/build/root/.bazelrc
generated
vendored
1
vendor/k8s.io/kubernetes/build/root/.bazelrc
generated
vendored
@ -15,6 +15,7 @@ build --sandbox_tmpfs_path=/tmp
|
||||
build --sandbox_fake_username
|
||||
|
||||
# Enable go race detection.
|
||||
build:unit --features=race
|
||||
test:unit --features=race
|
||||
test:unit --test_tag_filters=-e2e,-integration
|
||||
test:unit --flaky_test_attempts=3
|
||||
|
4
vendor/k8s.io/kubernetes/build/root/.kazelcfg.json
generated
vendored
4
vendor/k8s.io/kubernetes/build/root/.kazelcfg.json
generated
vendored
@ -1,7 +1,9 @@
|
||||
{
|
||||
"GoPrefix": "k8s.io/kubernetes",
|
||||
"SkippedPaths": [
|
||||
"^_.*"
|
||||
"^_.*",
|
||||
"/_",
|
||||
"^third_party/etcd.*"
|
||||
],
|
||||
"AddSourcesRules": true,
|
||||
"K8sOpenAPIGen": true
|
||||
|
9
vendor/k8s.io/kubernetes/build/root/BUILD.root
generated
vendored
9
vendor/k8s.io/kubernetes/build/root/BUILD.root
generated
vendored
@ -28,13 +28,13 @@ gcs_upload(
|
||||
data = [
|
||||
":_binary-artifacts-and-hashes",
|
||||
"//build/release-tars:release-tars-and-hashes",
|
||||
"//cluster/gce:gcs-release-artifacts-and-hashes",
|
||||
"//cluster/gce/gci:gcs-release-artifacts-and-hashes",
|
||||
],
|
||||
tags = ["manual"],
|
||||
upload_paths = {
|
||||
"//:_binary-artifacts-and-hashes": "bin/linux/amd64",
|
||||
"//build/release-tars:release-tars-and-hashes": "",
|
||||
"//cluster/gce:gcs-release-artifacts-and-hashes": "extra/gce",
|
||||
"//cluster/gce/gci:gcs-release-artifacts-and-hashes": "extra/gce",
|
||||
},
|
||||
)
|
||||
|
||||
@ -58,18 +58,19 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//api:all-srcs",
|
||||
"//api/openapi-spec:all-srcs",
|
||||
"//api/swagger-spec:all-srcs",
|
||||
"//build:all-srcs",
|
||||
"//cluster:all-srcs",
|
||||
"//cmd:all-srcs",
|
||||
"//docs:all-srcs",
|
||||
"//examples:all-srcs",
|
||||
"//hack:all-srcs",
|
||||
"//pkg:all-srcs",
|
||||
"//plugin:all-srcs",
|
||||
"//staging:all-srcs",
|
||||
"//test:all-srcs",
|
||||
"//third_party:all-srcs",
|
||||
"//translations:all-srcs",
|
||||
"//vendor:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
6
vendor/k8s.io/kubernetes/build/root/Makefile
generated
vendored
6
vendor/k8s.io/kubernetes/build/root/Makefile
generated
vendored
@ -112,10 +112,12 @@ define VERIFY_HELP_INFO
|
||||
#
|
||||
# Args:
|
||||
# BRANCH: Branch to be passed to verify-godeps.sh script.
|
||||
# WHAT: List of checks to run
|
||||
#
|
||||
# Example:
|
||||
# make verify
|
||||
# make verify BRANCH=branch_x
|
||||
# make verify WHAT="bazel typecheck"
|
||||
endef
|
||||
.PHONY: verify
|
||||
ifeq ($(PRINT_HELP),y)
|
||||
@ -213,7 +215,7 @@ test-e2e:
|
||||
@echo "$$TEST_E2E_HELP_INFO"
|
||||
else
|
||||
test-e2e: ginkgo generated_files
|
||||
go run hack/e2e.go -- -v --build --up --test --down
|
||||
go run hack/e2e.go -- --build --up --test --down
|
||||
endif
|
||||
|
||||
define TEST_E2E_NODE_HELP_INFO
|
||||
@ -249,7 +251,7 @@ define TEST_E2E_NODE_HELP_INFO
|
||||
# GUBERNATOR: For REMOTE=true only. Produce link to Gubernator to view logs.
|
||||
# Defaults to false.
|
||||
# PARALLELISM: The number of gingko nodes to run. Defaults to 8.
|
||||
# RUNTIME: Container runtime to use (eg. docker, rkt, remote).
|
||||
# RUNTIME: Container runtime to use (eg. docker, remote).
|
||||
# Defaults to "docker".
|
||||
# CONTAINER_RUNTIME_ENDPOINT: remote container endpoint to connect to.
|
||||
# Used when RUNTIME is set to "remote".
|
||||
|
271
vendor/k8s.io/kubernetes/build/root/Makefile.generated_files
generated
vendored
271
vendor/k8s.io/kubernetes/build/root/Makefile.generated_files
generated
vendored
@ -35,7 +35,7 @@ SHELL := /bin/bash
|
||||
# This rule collects all the generated file sets into a single rule. Other
|
||||
# rules should depend on this to ensure generated files are rebuilt.
|
||||
.PHONY: generated_files
|
||||
generated_files: gen_deepcopy gen_defaulter gen_conversion gen_openapi
|
||||
generated_files: gen_deepcopy gen_defaulter gen_conversion gen_openapi gen_bindata
|
||||
|
||||
.PHONY: verify_generated_files
|
||||
verify_generated_files: verify_gen_deepcopy \
|
||||
@ -486,110 +486,6 @@ $(DEFAULTER_GEN):
|
||||
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/defaulter-gen
|
||||
touch $@
|
||||
|
||||
#
|
||||
# Open-api generation
|
||||
#
|
||||
# Any package that wants open-api functions generated must include a
|
||||
# comment-tag in column 0 of one file of the form:
|
||||
# // +k8s:openapi-gen=true
|
||||
#
|
||||
# The result file, in each pkg, of open-api generation.
|
||||
OPENAPI_BASENAME := $(GENERATED_FILE_PREFIX)openapi
|
||||
OPENAPI_FILENAME := $(OPENAPI_BASENAME).go
|
||||
OPENAPI_OUTPUT_PKG := pkg/generated/openapi
|
||||
|
||||
# The tool used to generate open apis.
|
||||
OPENAPI_GEN := $(BIN_DIR)/openapi-gen
|
||||
|
||||
# Find all the directories that request open-api generation.
|
||||
ifeq ($(DBG_MAKEFILE),1)
|
||||
$(warning ***** finding all +k8s:openapi-gen tags)
|
||||
endif
|
||||
OPENAPI_DIRS := $(shell \
|
||||
grep --color=never -l '+k8s:openapi-gen=' $(ALL_K8S_TAG_FILES) \
|
||||
| xargs -n1 dirname \
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
OPENAPI_OUTFILE := $(OPENAPI_OUTPUT_PKG)/$(OPENAPI_FILENAME)
|
||||
|
||||
# This rule is the user-friendly entrypoint for openapi generation.
|
||||
.PHONY: gen_openapi
|
||||
gen_openapi: $(OPENAPI_OUTFILE) $(OPENAPI_GEN)
|
||||
|
||||
# For each dir in OPENAPI_DIRS, this establishes a dependency between the
|
||||
# output file and the input files that should trigger a rebuild.
|
||||
#
|
||||
# Note that this is a deps-only statement, not a full rule (see below). This
|
||||
# has to be done in a distinct step because wildcards don't work in static
|
||||
# pattern rules.
|
||||
#
|
||||
# The '$(eval)' is needed because this has a different RHS for each LHS, and
|
||||
# would otherwise produce results that make can't parse.
|
||||
#
|
||||
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
|
||||
# has changed. This allows us to detect deleted input files.
|
||||
$(foreach dir, $(OPENAPI_DIRS), $(eval \
|
||||
$(OPENAPI_OUTFILE): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
|
||||
$(gofiles__$(dir)) \
|
||||
))
|
||||
|
||||
# How to regenerate open-api code. This emits a single file for all results.
|
||||
$(OPENAPI_OUTFILE): $(OPENAPI_GEN) $(OPENAPI_GEN)
|
||||
function run_gen_openapi() { \
|
||||
./hack/run-in-gopath.sh $(OPENAPI_GEN) \
|
||||
--v $(KUBE_VERBOSE) \
|
||||
--logtostderr \
|
||||
-i $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(OPENAPI_DIRS)) | sed 's/ /,/g') \
|
||||
-p $(PRJ_SRC_PATH)/$(OPENAPI_OUTPUT_PKG) \
|
||||
-O $(OPENAPI_BASENAME) \
|
||||
"$$@"; \
|
||||
}; \
|
||||
run_gen_openapi
|
||||
|
||||
# This calculates the dependencies for the generator tool, so we only rebuild
|
||||
# it when needed. It is PHONY so that it always runs, but it only updates the
|
||||
# file if the contents have actually changed. We 'sinclude' this later.
|
||||
.PHONY: $(META_DIR)/$(OPENAPI_GEN).mk
|
||||
$(META_DIR)/$(OPENAPI_GEN).mk:
|
||||
mkdir -p $(@D); \
|
||||
(echo -n "$(OPENAPI_GEN): "; \
|
||||
./hack/run-in-gopath.sh go list \
|
||||
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
|
||||
./vendor/k8s.io/code-generator/cmd/openapi-gen \
|
||||
| grep --color=never "^$(PRJ_SRC_PATH)/" \
|
||||
| xargs ./hack/run-in-gopath.sh go list \
|
||||
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
|
||||
| paste -sd' ' - \
|
||||
| sed 's/ / \\=,/g' \
|
||||
| tr '=,' '\n\t' \
|
||||
| sed "s|$$(pwd -P)/||"; \
|
||||
) > $@.tmp; \
|
||||
if ! cmp -s $@.tmp $@; then \
|
||||
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
|
||||
echo "DBG: $(OPENAPI_GEN).mk changed"; \
|
||||
fi; \
|
||||
cat $@.tmp > $@; \
|
||||
rm -f $@.tmp; \
|
||||
fi
|
||||
|
||||
# Include dependency info for the generator tool. This will cause the rule of
|
||||
# the same name to be considered and if it is updated, make will restart.
|
||||
sinclude $(META_DIR)/$(OPENAPI_GEN).mk
|
||||
|
||||
# How to build the generator tool. The deps for this are defined in
|
||||
# the $(OPENAPI_GEN).mk, above.
|
||||
#
|
||||
# A word on the need to touch: This rule might trigger if, for example, a
|
||||
# non-Go file was added or deleted from a directory on which this depends.
|
||||
# This target needs to be reconsidered, but Go realizes it doesn't actually
|
||||
# have to be rebuilt. In that case, make will forever see the dependency as
|
||||
# newer than the binary, and try to rebuild it over and over. So we touch it,
|
||||
# and make is happy.
|
||||
$(OPENAPI_GEN):
|
||||
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/openapi-gen
|
||||
touch $@
|
||||
|
||||
#
|
||||
# Conversion generation
|
||||
#
|
||||
@ -805,3 +701,168 @@ sinclude $(META_DIR)/$(CONVERSION_GEN).mk
|
||||
$(CONVERSION_GEN):
|
||||
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/conversion-gen
|
||||
touch $@
|
||||
|
||||
#
|
||||
# Open-api generation
|
||||
#
|
||||
# Any package that wants open-api functions generated must include a
|
||||
# comment-tag in column 0 of one file of the form:
|
||||
# // +k8s:openapi-gen=true
|
||||
#
|
||||
# The result file, in each pkg, of open-api generation.
|
||||
OPENAPI_BASENAME := $(GENERATED_FILE_PREFIX)openapi
|
||||
OPENAPI_FILENAME := $(OPENAPI_BASENAME).go
|
||||
OPENAPI_OUTPUT_PKG := pkg/generated/openapi
|
||||
|
||||
# The tool used to generate open apis.
|
||||
OPENAPI_GEN := $(BIN_DIR)/openapi-gen
|
||||
|
||||
# Find all the directories that request open-api generation.
|
||||
ifeq ($(DBG_MAKEFILE),1)
|
||||
$(warning ***** finding all +k8s:openapi-gen tags)
|
||||
endif
|
||||
OPENAPI_DIRS := $(shell \
|
||||
grep --color=never -l '+k8s:openapi-gen=' $(ALL_K8S_TAG_FILES) \
|
||||
| xargs -n1 dirname \
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
OPENAPI_OUTFILE := $(OPENAPI_OUTPUT_PKG)/$(OPENAPI_FILENAME)
|
||||
|
||||
# This rule is the user-friendly entrypoint for openapi generation.
|
||||
.PHONY: gen_openapi
|
||||
gen_openapi: $(OPENAPI_OUTFILE) $(OPENAPI_GEN)
|
||||
|
||||
# For each dir in OPENAPI_DIRS, this establishes a dependency between the
|
||||
# output file and the input files that should trigger a rebuild.
|
||||
#
|
||||
# Note that this is a deps-only statement, not a full rule (see below). This
|
||||
# has to be done in a distinct step because wildcards don't work in static
|
||||
# pattern rules.
|
||||
#
|
||||
# The '$(eval)' is needed because this has a different RHS for each LHS, and
|
||||
# would otherwise produce results that make can't parse.
|
||||
#
|
||||
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
|
||||
# has changed. This allows us to detect deleted input files.
|
||||
$(foreach dir, $(OPENAPI_DIRS), $(eval \
|
||||
$(OPENAPI_OUTFILE): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
|
||||
$(gofiles__$(dir)) \
|
||||
))
|
||||
|
||||
# How to regenerate open-api code. This emits a single file for all results.
|
||||
$(OPENAPI_OUTFILE): $(OPENAPI_GEN) $(OPENAPI_GEN)
|
||||
function run_gen_openapi() { \
|
||||
./hack/run-in-gopath.sh $(OPENAPI_GEN) \
|
||||
--v $(KUBE_VERBOSE) \
|
||||
--logtostderr \
|
||||
-i $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(OPENAPI_DIRS)) | sed 's/ /,/g') \
|
||||
-p $(PRJ_SRC_PATH)/$(OPENAPI_OUTPUT_PKG) \
|
||||
-O $(OPENAPI_BASENAME) \
|
||||
"$$@"; \
|
||||
}; \
|
||||
run_gen_openapi
|
||||
|
||||
# This calculates the dependencies for the generator tool, so we only rebuild
|
||||
# it when needed. It is PHONY so that it always runs, but it only updates the
|
||||
# file if the contents have actually changed. We 'sinclude' this later.
|
||||
.PHONY: $(META_DIR)/$(OPENAPI_GEN).mk
|
||||
$(META_DIR)/$(OPENAPI_GEN).mk:
|
||||
mkdir -p $(@D); \
|
||||
(echo -n "$(OPENAPI_GEN): "; \
|
||||
./hack/run-in-gopath.sh go list \
|
||||
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
|
||||
./vendor/k8s.io/code-generator/cmd/openapi-gen \
|
||||
| grep --color=never "^$(PRJ_SRC_PATH)/" \
|
||||
| xargs ./hack/run-in-gopath.sh go list \
|
||||
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
|
||||
| paste -sd' ' - \
|
||||
| sed 's/ / \\=,/g' \
|
||||
| tr '=,' '\n\t' \
|
||||
| sed "s|$$(pwd -P)/||"; \
|
||||
) > $@.tmp; \
|
||||
if ! cmp -s $@.tmp $@; then \
|
||||
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
|
||||
echo "DBG: $(OPENAPI_GEN).mk changed"; \
|
||||
fi; \
|
||||
cat $@.tmp > $@; \
|
||||
rm -f $@.tmp; \
|
||||
fi
|
||||
|
||||
# Include dependency info for the generator tool. This will cause the rule of
|
||||
# the same name to be considered and if it is updated, make will restart.
|
||||
sinclude $(META_DIR)/$(OPENAPI_GEN).mk
|
||||
|
||||
# How to build the generator tool. The deps for this are defined in
|
||||
# the $(OPENAPI_GEN).mk, above.
|
||||
#
|
||||
# A word on the need to touch: This rule might trigger if, for example, a
|
||||
# non-Go file was added or deleted from a directory on which this depends.
|
||||
# This target needs to be reconsidered, but Go realizes it doesn't actually
|
||||
# have to be rebuilt. In that case, make will forever see the dependency as
|
||||
# newer than the binary, and try to rebuild it over and over. So we touch it,
|
||||
# and make is happy.
|
||||
$(OPENAPI_GEN):
|
||||
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/openapi-gen
|
||||
touch $@
|
||||
|
||||
#
|
||||
# bindata generation
|
||||
#
|
||||
|
||||
# The tool used to generate bindata files.
|
||||
BINDATA_GEN := $(BIN_DIR)/go-bindata
|
||||
|
||||
# A wrapper script that generates all bindata files. It is fast enough that we
|
||||
# don't care.
|
||||
BINDATA_SCRIPT := hack/generate-bindata.sh
|
||||
|
||||
# This rule is the user-friendly entrypoint for bindata generation.
|
||||
.PHONY: gen_bindata
|
||||
gen_bindata: $(BINDATA_GEN) FORCE
|
||||
./hack/run-in-gopath.sh $(BINDATA_SCRIPT)
|
||||
|
||||
FORCE:
|
||||
|
||||
# This calculates the dependencies for the generator tool, so we only rebuild
|
||||
# it when needed. It is PHONY so that it always runs, but it only updates the
|
||||
# file if the contents have actually changed. We 'sinclude' this later.
|
||||
.PHONY: $(META_DIR)/$(BINDATA_GEN).mk
|
||||
$(META_DIR)/$(BINDATA_GEN).mk:
|
||||
mkdir -p $(@D); \
|
||||
(echo -n "$(BINDATA_GEN): "; \
|
||||
./hack/run-in-gopath.sh go list \
|
||||
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
|
||||
./vendor/github.com/jteeuwen/go-bindata/go-bindata \
|
||||
| grep --color=never "^$(PRJ_SRC_PATH)/" \
|
||||
| xargs ./hack/run-in-gopath.sh go list \
|
||||
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
|
||||
| paste -sd' ' - \
|
||||
| sed 's/ / \\=,/g' \
|
||||
| tr '=,' '\n\t' \
|
||||
| sed "s|$$(pwd -P)/||"; \
|
||||
) > $@.tmp; \
|
||||
if ! cmp -s $@.tmp $@; then \
|
||||
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
|
||||
echo "DBG: $(BINDATA_GEN).mk changed"; \
|
||||
fi; \
|
||||
cat $@.tmp > $@; \
|
||||
rm -f $@.tmp; \
|
||||
fi
|
||||
|
||||
# Include dependency info for the generator tool. This will cause the rule of
|
||||
# the same name to be considered and if it is updated, make will restart.
|
||||
sinclude $(META_DIR)/$(BINDATA_GEN).mk
|
||||
|
||||
# How to build the generator tool. The deps for this are defined in
|
||||
# the $(BINDATA_GEN).mk, above.
|
||||
#
|
||||
# A word on the need to touch: This rule might trigger if, for example, a
|
||||
# non-Go file was added or deleted from a directory on which this depends.
|
||||
# This target needs to be reconsidered, but Go realizes it doesn't actually
|
||||
# have to be rebuilt. In that case, make will forever see the dependency as
|
||||
# newer than the binary, and try to rebuild it over and over. So we touch it,
|
||||
# and make is happy.
|
||||
$(BINDATA_GEN):
|
||||
hack/make-rules/build.sh ./vendor/github.com/jteeuwen/go-bindata/go-bindata
|
||||
touch $@
|
||||
|
44
vendor/k8s.io/kubernetes/build/root/WORKSPACE
generated
vendored
44
vendor/k8s.io/kubernetes/build/root/WORKSPACE
generated
vendored
@ -1,52 +1,54 @@
|
||||
load("//build:workspace_mirror.bzl", "mirror")
|
||||
load("//build:workspace.bzl", "CRI_TOOLS_VERSION")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "66282d078c1847c2d876c02c5dabd4fd57cc75eb41a9668a2374352fa73b4587",
|
||||
strip_prefix = "rules_go-ff7e3364d9383cf14155f8c2efc87218d07eb03b",
|
||||
urls = ["https://github.com/bazelbuild/rules_go/archive/ff7e3364d9383cf14155f8c2efc87218d07eb03b.tar.gz"],
|
||||
sha256 = "242602c9818a83cbe97d1446b48263dcd48949a74d713c172d1b03da841b168a",
|
||||
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.10.5/rules_go-0.10.5.tar.gz"),
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "007774f06536059f3f782d1a092bddc625d88c17f20bbe731cea844a52485b11",
|
||||
strip_prefix = "repo-infra-97099dccc8807e9159dc28f374a8f0602cab07e1",
|
||||
urls = ["https://github.com/kubernetes/repo-infra/archive/97099dccc8807e9159dc28f374a8f0602cab07e1.tar.gz"],
|
||||
urls = mirror("https://github.com/kubernetes/repo-infra/archive/97099dccc8807e9159dc28f374a8f0602cab07e1.tar.gz"),
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d",
|
||||
strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b",
|
||||
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"],
|
||||
urls = mirror("https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"),
|
||||
)
|
||||
|
||||
ETCD_VERSION = "3.2.14"
|
||||
ETCD_VERSION = "3.2.18"
|
||||
|
||||
new_http_archive(
|
||||
name = "com_coreos_etcd",
|
||||
build_file = "third_party/etcd.BUILD",
|
||||
sha256 = "f77398f558ff19b65a0bf978b47868e03683f27090c56c054415666b1d78bf42",
|
||||
sha256 = "b729db0732448064271ea6fdcb901773c4fe917763ca07776f22d0e5e0bd4097",
|
||||
strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION,
|
||||
urls = ["https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)],
|
||||
urls = mirror("https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)),
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "c440717ee9b1b2f4a1e9bf5622539feb5aef9db83fc1fa1517818f13c041b0be",
|
||||
strip_prefix = "rules_docker-8bbe2a8abd382641e65ff7127a3700a8530f02ce",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/8bbe2a8abd382641e65ff7127a3700a8530f02ce.tar.gz"],
|
||||
urls = mirror("https://github.com/bazelbuild/rules_docker/archive/8bbe2a8abd382641e65ff7127a3700a8530f02ce.tar.gz"),
|
||||
)
|
||||
|
||||
load("@bazel_skylib//:lib.bzl", "versions")
|
||||
|
||||
versions.check(minimum_bazel_version = "0.10.0")
|
||||
versions.check(minimum_bazel_version = "0.13.0")
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains", "go_download_sdk")
|
||||
load("@io_bazel_rules_docker//docker:docker.bzl", "docker_repositories", "docker_pull")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies")
|
||||
load("@io_bazel_rules_docker//docker:docker.bzl", "docker_pull", "docker_repositories")
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.9.3",
|
||||
go_version = "1.10.3",
|
||||
)
|
||||
|
||||
docker_repositories()
|
||||
@ -54,7 +56,13 @@ docker_repositories()
|
||||
http_file(
|
||||
name = "kubernetes_cni",
|
||||
sha256 = "f04339a21b8edf76d415e7f17b620e63b8f37a76b2f706671587ab6464411f2d",
|
||||
url = "https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz",
|
||||
urls = mirror("https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz"),
|
||||
)
|
||||
|
||||
http_file(
|
||||
name = "cri_tools",
|
||||
sha256 = "bdc838174778223a1af4bdeaaed4bd266120c0e152588f78750fb86221677fb4",
|
||||
urls = mirror("https://github.com/kubernetes-incubator/cri-tools/releases/download/v%s/crictl-v%s-linux-amd64.tar.gz" % (CRI_TOOLS_VERSION, CRI_TOOLS_VERSION)),
|
||||
)
|
||||
|
||||
docker_pull(
|
||||
@ -67,10 +75,10 @@ docker_pull(
|
||||
|
||||
docker_pull(
|
||||
name = "debian-hyperkube-base-amd64",
|
||||
digest = "sha256:d83594ecd85345144584523e7fa5388467edf5d2dfa30d0a1bcbf184cddf4a7b",
|
||||
digest = "sha256:cc782ed16599000ca4c85d47ec6264753747ae1e77520894dca84b104a7621e2",
|
||||
registry = "k8s.gcr.io",
|
||||
repository = "debian-hyperkube-base-amd64",
|
||||
tag = "0.9", # ignored, but kept here for documentation
|
||||
tag = "0.10", # ignored, but kept here for documentation
|
||||
)
|
||||
|
||||
docker_pull(
|
||||
@ -80,3 +88,7 @@ docker_pull(
|
||||
repository = "library/busybox",
|
||||
tag = "latest", # ignored, but kept here for documentation
|
||||
)
|
||||
|
||||
load("//build:workspace_mirror.bzl", "export_urls")
|
||||
|
||||
export_urls("workspace_urls")
|
||||
|
18
vendor/k8s.io/kubernetes/build/rpms/10-kubeadm.conf
generated
vendored
18
vendor/k8s.io/kubernetes/build/rpms/10-kubeadm.conf
generated
vendored
@ -1,13 +1,11 @@
|
||||
# Note: This dropin only works with kubeadm and kubelet v1.11+
|
||||
[Service]
|
||||
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
|
||||
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true"
|
||||
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||
Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local"
|
||||
Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt"
|
||||
# Value should match Docker daemon settings.
|
||||
# Defaults are "cgroupfs" for Debian/Ubuntu/OpenSUSE and "systemd" for Fedora/CentOS/RHEL
|
||||
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemd"
|
||||
Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port=0"
|
||||
Environment="KUBELET_CERTIFICATE_ARGS=--rotate-certificates=true"
|
||||
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
|
||||
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
|
||||
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
|
||||
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
|
||||
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
|
||||
EnvironmentFile=-/etc/sysconfig/kubelet
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CGROUP_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_EXTRA_ARGS
|
||||
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
|
||||
|
31
vendor/k8s.io/kubernetes/build/rpms/BUILD
generated
vendored
31
vendor/k8s.io/kubernetes/build/rpms/BUILD
generated
vendored
@ -1,6 +1,20 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@bazel_tools//tools/build_defs/pkg:rpm.bzl", "pkg_rpm")
|
||||
load("//build:workspace.bzl", "CRI_TOOLS_VERSION")
|
||||
|
||||
filegroup(
|
||||
name = "rpms",
|
||||
srcs = [
|
||||
":cri-tools",
|
||||
":kubeadm",
|
||||
":kubectl",
|
||||
":kubelet",
|
||||
":kubernetes-cni",
|
||||
],
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_rpm(
|
||||
name = "kubectl",
|
||||
@ -10,6 +24,7 @@ pkg_rpm(
|
||||
"//cmd/kubectl",
|
||||
],
|
||||
spec_file = "kubectl.spec",
|
||||
tags = ["manual"],
|
||||
version_file = "//build:os_package_version",
|
||||
)
|
||||
|
||||
@ -22,6 +37,7 @@ pkg_rpm(
|
||||
"//cmd/kubelet",
|
||||
],
|
||||
spec_file = "kubelet.spec",
|
||||
tags = ["manual"],
|
||||
version_file = "//build:os_package_version",
|
||||
)
|
||||
|
||||
@ -31,9 +47,11 @@ pkg_rpm(
|
||||
changelog = "//:CHANGELOG.md",
|
||||
data = [
|
||||
"10-kubeadm.conf",
|
||||
"kubelet.env",
|
||||
"//cmd/kubeadm",
|
||||
],
|
||||
spec_file = "kubeadm.spec",
|
||||
tags = ["manual"],
|
||||
version_file = "//build:os_package_version",
|
||||
)
|
||||
|
||||
@ -45,9 +63,22 @@ pkg_rpm(
|
||||
"@kubernetes_cni//file",
|
||||
],
|
||||
spec_file = "kubernetes-cni.spec",
|
||||
tags = ["manual"],
|
||||
version_file = "//build:cni_package_version",
|
||||
)
|
||||
|
||||
pkg_rpm(
|
||||
name = "cri-tools",
|
||||
architecture = "x86_64",
|
||||
data = [
|
||||
"@cri_tools//file",
|
||||
],
|
||||
spec_file = "cri-tools.spec",
|
||||
tags = ["manual"],
|
||||
# dashes are not allowed in rpm versions
|
||||
version = CRI_TOOLS_VERSION.replace("-", "_"),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
|
21
vendor/k8s.io/kubernetes/build/rpms/cri-tools.spec
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/build/rpms/cri-tools.spec
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
Name: cri-tools
|
||||
Version: OVERRIDE_THIS
|
||||
Release: 00
|
||||
License: ASL 2.0
|
||||
Summary: Container Runtime Interface tools
|
||||
|
||||
URL: https://kubernetes.io
|
||||
|
||||
%description
|
||||
Binaries to interface with the container runtime.
|
||||
|
||||
%prep
|
||||
# TODO(chuckha): update this to use %{version} when the dash is removed from the release
|
||||
tar -xzf {crictl-v1.0.0-beta.1-linux-amd64.tar.gz}
|
||||
|
||||
%install
|
||||
install -m 755 -d %{buildroot}%{_bindir}
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} crictl
|
||||
|
||||
%files
|
||||
%{_bindir}/crictl
|
7
vendor/k8s.io/kubernetes/build/rpms/kubeadm.spec
generated
vendored
7
vendor/k8s.io/kubernetes/build/rpms/kubeadm.spec
generated
vendored
@ -16,9 +16,12 @@ Command-line utility for deploying a Kubernetes cluster.
|
||||
install -m 755 -d %{buildroot}%{_bindir}
|
||||
install -m 755 -d %{buildroot}%{_sysconfdir}/systemd/system/
|
||||
install -m 755 -d %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} kubeadm
|
||||
install -p -m 755 -t %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/ 10-kubeadm.conf
|
||||
install -m 755 -d %{buildroot}%{_sysconfdir}/sysconfig/
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} {kubeadm}
|
||||
install -p -m 755 -t %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/ {10-kubeadm.conf}
|
||||
install -p -m 755 -T {kubelet.env} %{buildroot}%{_sysconfdir}/sysconfig/kubelet
|
||||
|
||||
%files
|
||||
%{_bindir}/kubeadm
|
||||
%{_sysconfdir}/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
%{_sysconfdir}/sysconfig/kubelet
|
||||
|
2
vendor/k8s.io/kubernetes/build/rpms/kubectl.spec
generated
vendored
2
vendor/k8s.io/kubernetes/build/rpms/kubectl.spec
generated
vendored
@ -12,7 +12,7 @@ Command-line utility for interacting with a Kubernetes cluster.
|
||||
%install
|
||||
|
||||
install -m 755 -d %{buildroot}%{_bindir}
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} kubectl
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} {kubectl}
|
||||
|
||||
%files
|
||||
%{_bindir}/kubectl
|
||||
|
1
vendor/k8s.io/kubernetes/build/rpms/kubelet.env
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/build/rpms/kubelet.env
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
KUBELET_EXTRA_ARGS=
|
4
vendor/k8s.io/kubernetes/build/rpms/kubelet.spec
generated
vendored
4
vendor/k8s.io/kubernetes/build/rpms/kubelet.spec
generated
vendored
@ -22,8 +22,8 @@ The node agent of Kubernetes, the container cluster manager.
|
||||
install -m 755 -d %{buildroot}%{_bindir}
|
||||
install -m 755 -d %{buildroot}%{_sysconfdir}/systemd/system/
|
||||
install -m 755 -d %{buildroot}%{_sysconfdir}/kubernetes/manifests/
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} kubelet
|
||||
install -p -m 755 -t %{buildroot}%{_sysconfdir}/systemd/system/ kubelet.service
|
||||
install -p -m 755 -t %{buildroot}%{_bindir} {kubelet}
|
||||
install -p -m 755 -t %{buildroot}%{_sysconfdir}/systemd/system/ {kubelet.service}
|
||||
|
||||
%files
|
||||
%{_bindir}/kubelet
|
||||
|
2
vendor/k8s.io/kubernetes/build/rpms/kubernetes-cni.spec
generated
vendored
2
vendor/k8s.io/kubernetes/build/rpms/kubernetes-cni.spec
generated
vendored
@ -11,7 +11,7 @@ Binaries required to provision container networking.
|
||||
|
||||
%prep
|
||||
mkdir -p ./bin
|
||||
tar -C ./bin -xz -f cni-plugins-amd64-v0.6.0.tgz
|
||||
tar -C ./bin -xz -f {cni-plugins-amd64-v0.6.0.tgz}
|
||||
|
||||
%install
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/build/run.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/run.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
2
vendor/k8s.io/kubernetes/build/shell.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/shell.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
|
2
vendor/k8s.io/kubernetes/build/util.sh
generated
vendored
2
vendor/k8s.io/kubernetes/build/util.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
78
vendor/k8s.io/kubernetes/build/visible_to/BUILD
generated
vendored
78
vendor/k8s.io/kubernetes/build/visible_to/BUILD
generated
vendored
@ -40,13 +40,8 @@ package_group(
|
||||
"//hack",
|
||||
"//hack/lib",
|
||||
"//hack/make-rules",
|
||||
"//test/e2e",
|
||||
"//test/e2e/framework",
|
||||
"//test/e2e/kubectl",
|
||||
"//test/e2e/workload",
|
||||
"//test/integration/etcd",
|
||||
"//test/integration/framework",
|
||||
"//test/integration/kubectl",
|
||||
"//test/e2e/...",
|
||||
"//test/integration/...",
|
||||
],
|
||||
)
|
||||
|
||||
@ -78,23 +73,10 @@ package_group(
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_CONSUMERS_BAD",
|
||||
includes = [
|
||||
":KUBEADM_BAD",
|
||||
],
|
||||
packages = [
|
||||
"//cmd/clicheck",
|
||||
"//cmd/hyperkube",
|
||||
"//pkg",
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_CONSUMERS",
|
||||
includes = [
|
||||
":COMMON_generators",
|
||||
":pkg_kubectl_CONSUMERS_BAD",
|
||||
],
|
||||
packages = [
|
||||
"//cmd/kubectl",
|
||||
@ -147,6 +129,20 @@ package_group(
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_cmd_create_CONSUMERS",
|
||||
packages = [
|
||||
"//pkg/kubectl/cmd",
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_cmd_get_CONSUMERS",
|
||||
packages = [
|
||||
"//pkg/kubectl/cmd",
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_cmd_rollout_CONSUMERS",
|
||||
packages = [
|
||||
@ -174,12 +170,14 @@ package_group(
|
||||
"//pkg/kubectl/cmd",
|
||||
"//pkg/kubectl/cmd/auth",
|
||||
"//pkg/kubectl/cmd/config",
|
||||
"//pkg/kubectl/cmd/resource",
|
||||
"//pkg/kubectl/cmd/create",
|
||||
"//pkg/kubectl/cmd/get",
|
||||
"//pkg/kubectl/cmd/rollout",
|
||||
"//pkg/kubectl/cmd/set",
|
||||
"//pkg/kubectl/cmd/templates",
|
||||
"//pkg/kubectl/cmd/util",
|
||||
"//pkg/kubectl/cmd/util/sanity",
|
||||
"//pkg/kubectl/cmd/wait",
|
||||
],
|
||||
)
|
||||
|
||||
@ -195,31 +193,21 @@ package_group(
|
||||
packages = [
|
||||
"//pkg/kubectl/cmd",
|
||||
"//pkg/kubectl/cmd/auth",
|
||||
"//pkg/kubectl/cmd/resource",
|
||||
"//pkg/kubectl/cmd/create",
|
||||
"//pkg/kubectl/cmd/get",
|
||||
"//pkg/kubectl/cmd/rollout",
|
||||
"//pkg/kubectl/cmd/set",
|
||||
"//pkg/kubectl/cmd/wait",
|
||||
"//pkg/kubectl/explain",
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_cmd_util_CONSUMERS_BAD",
|
||||
includes = [
|
||||
":KUBEADM_BAD",
|
||||
],
|
||||
packages = [
|
||||
"//cmd/clicheck",
|
||||
"//cmd/hyperkube",
|
||||
"//cmd/kube-proxy/app",
|
||||
"//cmd/kube-scheduler/app",
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_cmd_util_CONSUMERS",
|
||||
includes = [
|
||||
":COMMON_generators",
|
||||
":COMMON_testing",
|
||||
":pkg_kubectl_cmd_util_CONSUMERS_BAD",
|
||||
":KUBEADM_BAD",
|
||||
],
|
||||
packages = [
|
||||
"//cmd/kubectl",
|
||||
@ -227,12 +215,14 @@ package_group(
|
||||
"//pkg/kubectl/cmd",
|
||||
"//pkg/kubectl/cmd/auth",
|
||||
"//pkg/kubectl/cmd/config",
|
||||
"//pkg/kubectl/cmd/resource",
|
||||
"//pkg/kubectl/cmd/create",
|
||||
"//pkg/kubectl/cmd/get",
|
||||
"//pkg/kubectl/cmd/rollout",
|
||||
"//pkg/kubectl/cmd/set",
|
||||
"//pkg/kubectl/cmd/testing",
|
||||
"//pkg/kubectl/cmd/util",
|
||||
"//pkg/kubectl/cmd/util/editor",
|
||||
"//pkg/kubectl/cmd/wait",
|
||||
],
|
||||
)
|
||||
|
||||
@ -240,6 +230,7 @@ package_group(
|
||||
name = "pkg_kubectl_cmd_util_editor_CONSUMERS",
|
||||
packages = [
|
||||
"//pkg/kubectl/cmd",
|
||||
"//pkg/kubectl/cmd/create",
|
||||
"//pkg/kubectl/cmd/util",
|
||||
],
|
||||
)
|
||||
@ -260,19 +251,10 @@ package_group(
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_metricsutil_CONSUMERS_BAD",
|
||||
packages = [
|
||||
"//cmd/clicheck",
|
||||
"//cmd/hyperkube",
|
||||
],
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "pkg_kubectl_metricsutil_CONSUMERS",
|
||||
includes = [
|
||||
":COMMON_generators",
|
||||
":pkg_kubectl_metricsutil_CONSUMERS_BAD",
|
||||
],
|
||||
packages = [
|
||||
"//cmd/kubectl",
|
||||
@ -295,7 +277,8 @@ package_group(
|
||||
"//pkg/kubectl/cmd",
|
||||
"//pkg/kubectl/cmd/auth",
|
||||
"//pkg/kubectl/cmd/config",
|
||||
"//pkg/kubectl/cmd/resource",
|
||||
"//pkg/kubectl/cmd/create",
|
||||
"//pkg/kubectl/cmd/get",
|
||||
"//pkg/kubectl/cmd/rollout",
|
||||
"//pkg/kubectl/cmd/set",
|
||||
"//pkg/kubectl/cmd/testing",
|
||||
@ -325,6 +308,7 @@ package_group(
|
||||
name = "pkg_kubectl_validation_CONSUMERS",
|
||||
packages = [
|
||||
"//pkg/kubectl",
|
||||
"//pkg/kubectl/cmd",
|
||||
"//pkg/kubectl/cmd/testing",
|
||||
"//pkg/kubectl/cmd/util",
|
||||
"//pkg/kubectl/resource",
|
||||
|
2
vendor/k8s.io/kubernetes/build/visible_to/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/build/visible_to/README.md
generated
vendored
@ -115,7 +115,7 @@ visibility = ["//visible_to:client_foo,//visible_to:server_foo"],
|
||||
```
|
||||
bazel build --check_visibility --nobuild \
|
||||
//cmd/... //pkg/... //plugin/... \
|
||||
//third_party/... //examples/... //test/... //vendor/k8s.io/...
|
||||
//third_party/... //test/... //vendor/k8s.io/...
|
||||
```
|
||||
|
||||
#### Who depends on target _q_?
|
||||
|
15
vendor/k8s.io/kubernetes/build/workspace.bzl
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/build/workspace.bzl
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
CRI_TOOLS_VERSION = "1.0.0-beta.1"
|
57
vendor/k8s.io/kubernetes/build/workspace_mirror.bzl
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/build/workspace_mirror.bzl
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
prefix = "https://storage.googleapis.com/k8s-bazel-cache/"
|
||||
|
||||
def mirror(url):
|
||||
"""Try downloading a URL from a GCS mirror first, then from the original.
|
||||
|
||||
Update the GCS bucket using bazel run //hack:update-mirror"""
|
||||
return [prefix + url, url]
|
||||
|
||||
def mirror_urls():
|
||||
# This function only gives proper results when executed from WORKSPACE,
|
||||
# but the data is needed in sh_binary, which can only be in a BUILD file.
|
||||
# Thus, it is be exported by a repository_rule (which executes in WORKSPACE)
|
||||
# to be used by the sh_binary.
|
||||
urls = []
|
||||
for k, v in native.existing_rules().items():
|
||||
us = list(v.get("urls", []))
|
||||
if "url" in v:
|
||||
us.append(v["url"])
|
||||
for u in us:
|
||||
if u and not u.startswith(prefix):
|
||||
urls.append(u)
|
||||
return sorted(urls)
|
||||
|
||||
def export_urls_impl(repo_ctx):
|
||||
repo_ctx.file(repo_ctx.path("BUILD.bazel"), """
|
||||
exports_files(glob(["**"]), visibility=["//visibility:public"])
|
||||
""")
|
||||
repo_ctx.file(
|
||||
repo_ctx.path("urls.txt"),
|
||||
# Add a trailing newline, since the "while read" loop needs it
|
||||
content = ("\n".join(repo_ctx.attr.urls) + "\n"),
|
||||
)
|
||||
|
||||
_export_urls = repository_rule(
|
||||
attrs = {
|
||||
"urls": attr.string_list(mandatory = True),
|
||||
},
|
||||
local = True,
|
||||
implementation = export_urls_impl,
|
||||
)
|
||||
|
||||
def export_urls(name):
|
||||
return _export_urls(name = name, urls = mirror_urls())
|
Reference in New Issue
Block a user