Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

18
vendor/k8s.io/kubernetes/build/BUILD generated vendored
View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_docker//docker:docker.bzl", "docker_build", "docker_bundle")
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
load("@io_kubernetes_build//defs:build.bzl", "release_filegroup")
filegroup(
@ -25,19 +25,19 @@ filegroup(
# in build/common.sh.
DOCKERIZED_BINARIES = {
"cloud-controller-manager": {
"base": "@official_busybox//image",
"base": "@debian-base-amd64//image",
"target": "//cmd/cloud-controller-manager:cloud-controller-manager",
},
"kube-apiserver": {
"base": "@official_busybox//image",
"base": "@debian-base-amd64//image",
"target": "//cmd/kube-apiserver:kube-apiserver",
},
"kube-controller-manager": {
"base": "@official_busybox//image",
"base": "@debian-base-amd64//image",
"target": "//cmd/kube-controller-manager:kube-controller-manager",
},
"kube-scheduler": {
"base": "@official_busybox//image",
"base": "@debian-base-amd64//image",
"target": "//cmd/kube-scheduler:kube-scheduler",
},
"kube-proxy": {
@ -46,13 +46,14 @@ DOCKERIZED_BINARIES = {
},
}
[docker_build(
[container_image(
name = binary + "-internal",
base = meta["base"],
cmd = ["/usr/bin/" + binary],
debs = [
"//build/debs:%s.deb" % binary,
],
stamp = True,
symlinks = {
# Some cluster startup scripts expect to find the binaries in /usr/local/bin,
# but the debs install the binaries into /usr/bin.
@ -60,12 +61,10 @@ DOCKERIZED_BINARIES = {
},
) for binary, meta in DOCKERIZED_BINARIES.items()]
[docker_bundle(
[container_bundle(
name = binary,
# TODO(thockin): remove the google_containers name after release 1.11.
images = {
"k8s.gcr.io/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal",
"gcr.io/google_containers/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal",
},
stamp = True,
) for binary in DOCKERIZED_BINARIES.keys()]
@ -132,7 +131,6 @@ release_filegroup(
"//cmd/kube-apiserver",
"//cmd/kube-controller-manager",
"//cmd/kube-scheduler",
"//vendor/k8s.io/kube-aggregator",
],
)

View File

@ -3,6 +3,7 @@ reviewers:
- ixdy
- jbeda
- lavalamp
- spiffxp
- zmerlynn
approvers:
- cblecker

View File

@ -24,12 +24,13 @@ The following scripts are found in the `build/` directory. Note that all scripts
* `build/run.sh`: Run a command in a build docker container. Common invocations:
* `build/run.sh make`: Build just linux binaries in the container. Pass options and packages as necessary.
* `build/run.sh make cross`: Build all binaries for all platforms
* `build/run.sh make kubectl KUBE_BUILD_PLATFORMS=darwin/amd64`: Build the specific binary for the specific platform (`kubectl` and `darwin/amd64` respectively in this example)
* `build/run.sh make test`: Run all unit tests
* `build/run.sh make test-integration`: Run integration test
* `build/run.sh make test-cmd`: Run CLI tests
* `build/copy-output.sh`: This will copy the contents of `_output/dockerized/bin` from the Docker container to the local `_output/dockerized/bin`. It will also copy out specific file patterns that are generated as part of the build process. This is run automatically as part of `build/run.sh`.
* `build/make-clean.sh`: Clean out the contents of `_output`, remove any locally built container images and remove the data container.
* `/build/shell.sh`: Drop into a `bash` shell in a build container with a snapshot of the current repo code.
* `build/shell.sh`: Drop into a `bash` shell in a build container with a snapshot of the current repo code.
## Basic Flow
@ -81,7 +82,7 @@ docker-machine create \
# Set up local docker to talk to that machine
eval $(docker-machine env ${KUBE_BUILD_VM})
# Pin down the port that rsync will be exposed on on the remote machine
# Pin down the port that rsync will be exposed on the remote machine
export KUBE_RSYNC_PORT=8730
# forward local 8730 to that machine so that rsync works

View File

@ -15,7 +15,7 @@
# This file creates a standard build environment for building cross
# platform go binary for the architecture kubernetes cares about.
FROM golang:1.10.3
FROM golang:1.11.2
ENV GOARM 7
ENV KUBE_DYNAMIC_CROSSPLATFORMS \
@ -33,24 +33,14 @@ ENV KUBE_CROSSPLATFORMS \
windows/amd64 windows/386
# Pre-compile the standard go library when cross-compiling. This is much easier now when we have go1.5+
RUN for platform in ${KUBE_CROSSPLATFORMS}; do GOOS=${platform%/*} GOARCH=${platform##*/} go install std; done
RUN for platform in ${KUBE_CROSSPLATFORMS}; do GOOS=${platform%/*} GOARCH=${platform##*/} go install std; done \
&& go clean -cache
# Install g++, then download and install protoc for generating protobuf output
RUN apt-get update \
&& apt-get install -y g++ rsync jq apt-utils file patch \
&& apt-get install -y rsync jq apt-utils file patch unzip \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir -p /usr/local/src/protobuf \
&& cd /usr/local/src/protobuf \
&& curl -sSL https://github.com/google/protobuf/releases/download/v3.0.0-beta-2/protobuf-cpp-3.0.0-beta-2.tar.gz | tar -xzv \
&& cd protobuf-3.0.0-beta-2 \
&& ./configure \
&& make install \
&& ldconfig \
&& cd .. \
&& rm -rf protobuf-3.0.0-beta-2 \
&& protoc --version
# Use dynamic cgo linking for architectures other than amd64 for the server platforms
# To install crossbuild essential for other architectures add the following repository.
RUN echo "deb http://archive.ubuntu.com/ubuntu xenial main universe" > /etc/apt/sources.list.d/cgocrosscompiling.list \
@ -60,6 +50,16 @@ RUN echo "deb http://archive.ubuntu.com/ubuntu xenial main universe" > /etc/apt/
&& for platform in ${KUBE_DYNAMIC_CROSSPLATFORMS}; do apt-get install -y crossbuild-essential-${platform}; done \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
RUN PROTOBUF_VERSION=3.0.2; ZIPNAME="protoc-${PROTOBUF_VERSION}-linux-x86_64.zip"; \
mkdir /tmp/protoc && cd /tmp/protoc \
&& wget "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/${ZIPNAME}" \
&& unzip "${ZIPNAME}" \
&& chmod -R +rX /tmp/protoc \
&& cp -pr bin /usr/local \
&& cp -pr include /usr/local \
&& rm -rf /tmp/protoc \
&& protoc --version
# work around 64MB tmpfs size in Docker 1.6
ENV TMPDIR /tmp.k8s
RUN mkdir $TMPDIR \
@ -68,10 +68,11 @@ RUN mkdir $TMPDIR \
# Get the code coverage tool and goimports
RUN go get golang.org/x/tools/cmd/cover \
golang.org/x/tools/cmd/goimports
golang.org/x/tools/cmd/goimports \
&& go clean -cache
# Download and symlink etcd. We need this for our integration tests.
RUN export ETCD_VERSION=v3.2.18; \
RUN export ETCD_VERSION=v3.2.24; \
mkdir -p /usr/local/src/etcd \
&& cd /usr/local/src/etcd \
&& curl -fsSL https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | tar -xz \

View File

@ -1 +1 @@
v1.10.3-1
v1.11.2-1

View File

@ -19,6 +19,9 @@ set -o errexit
set -o nounset
set -o pipefail
# Unset CDPATH, having it set messes up with script import paths
unset CDPATH
USER_ID=$(id -u)
GROUP_ID=$(id -g)
@ -85,56 +88,18 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730
#
# $1 - server architecture
kube::build::get_docker_wrapped_binaries() {
debian_iptables_version=v10
local arch=$1
local debian_base_version=0.4.0
local debian_iptables_version=v11.0
### If you change any of these lists, please also update DOCKERIZED_BINARIES
### in build/BUILD.
case $1 in
"amd64")
local targets=(
cloud-controller-manager,busybox
kube-apiserver,busybox
kube-controller-manager,busybox
kube-scheduler,busybox
kube-aggregator,busybox
kube-proxy,k8s.gcr.io/debian-iptables-amd64:${debian_iptables_version}
);;
"arm")
local targets=(
cloud-controller-manager,arm32v7/busybox
kube-apiserver,arm32v7/busybox
kube-controller-manager,arm32v7/busybox
kube-scheduler,arm32v7/busybox
kube-aggregator,arm32v7/busybox
kube-proxy,k8s.gcr.io/debian-iptables-arm:${debian_iptables_version}
);;
"arm64")
local targets=(
cloud-controller-manager,arm64v8/busybox
kube-apiserver,arm64v8/busybox
kube-controller-manager,arm64v8/busybox
kube-scheduler,arm64v8/busybox
kube-aggregator,arm64v8/busybox
kube-proxy,k8s.gcr.io/debian-iptables-arm64:${debian_iptables_version}
);;
"ppc64le")
local targets=(
cloud-controller-manager,ppc64le/busybox
kube-apiserver,ppc64le/busybox
kube-controller-manager,ppc64le/busybox
kube-scheduler,ppc64le/busybox
kube-aggregator,ppc64le/busybox
kube-proxy,k8s.gcr.io/debian-iptables-ppc64le:${debian_iptables_version}
);;
"s390x")
local targets=(
cloud-controller-manager,s390x/busybox
kube-apiserver,s390x/busybox
kube-controller-manager,s390x/busybox
kube-scheduler,s390x/busybox
kube-aggregator,s390x/busybox
kube-proxy,k8s.gcr.io/debian-iptables-s390x:${debian_iptables_version}
);;
esac
### in build/BUILD. And kube::golang::server_image_targets
local targets=(
cloud-controller-manager,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
kube-apiserver,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
kube-controller-manager,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
kube-scheduler,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
kube-proxy,"k8s.gcr.io/debian-iptables-${arch}:${debian_iptables_version}"
)
echo "${targets[@]}"
}
@ -598,9 +563,11 @@ function kube::build::run_build_command_ex() {
--env "KUBE_FASTBUILD=${KUBE_FASTBUILD:-false}"
--env "KUBE_BUILDER_OS=${OSTYPE:-notdetected}"
--env "KUBE_VERBOSE=${KUBE_VERBOSE}"
--env "KUBE_BUILD_WITH_COVERAGE=${KUBE_BUILD_WITH_COVERAGE:-}"
--env "GOFLAGS=${GOFLAGS:-}"
--env "GOLDFLAGS=${GOLDFLAGS:-}"
--env "GOGCFLAGS=${GOGCFLAGS:-}"
--env "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH:-}"
)
if [[ -n "${DOCKER_CGROUP_PARENT:-}" ]]; then

View File

@ -56,6 +56,7 @@ RUN echo "Yes, do as I say!" | apt-get purge \
libprocps6 \
libslang2 \
libss2 \
libsystemd0 \
libtext-charwidth-perl libtext-iconv-perl libtext-wrapi18n-perl \
ncurses-base \
ncurses-bin \

View File

@ -12,19 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
all: build
all: all-build
REGISTRY ?= staging-k8s.gcr.io
IMAGE ?= debian-base
IMAGE ?= $(REGISTRY)/debian-base
BUILD_IMAGE ?= debian-build
TAG ?= 0.3
TAG ?= 0.4.0
TAR_FILE ?= rootfs.tar
ARCH?=amd64
ALL_ARCH = amd64 arm arm64 ppc64le s390x
TEMP_DIR:=$(shell mktemp -d)
QEMUVERSION=v2.9.1
SUDO=$(if $(filter 0,$(shell id -u)),,sudo)
# This option is for running docker manifest command
export DOCKER_CLI_EXPERIMENTAL := enabled
ifeq ($(ARCH),amd64)
BASEIMAGE?=debian:stretch
endif
@ -45,6 +52,23 @@ ifeq ($(ARCH),s390x)
QEMUARCH=s390x
endif
sub-build-%:
$(MAKE) ARCH=$* build
all-build: $(addprefix sub-build-,$(ALL_ARCH))
sub-push-image-%:
$(MAKE) ARCH=$* push
all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH))
all-push: all-push-images push-manifest
push-manifest:
docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
docker manifest push --purge ${IMAGE}:${TAG}
build: clean
cp ./* $(TEMP_DIR)
cat Dockerfile.build \
@ -57,9 +81,11 @@ ifeq ($(ARCH),amd64)
sed "/CROSS_BUILD_/d" $(TEMP_DIR)/Dockerfile.build > $(TEMP_DIR)/Dockerfile.build.tmp
else
# When cross-building, only the placeholder "CROSS_BUILD_" should be removed
# Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel
docker run --rm --privileged multiarch/qemu-user-static:register --reset
# Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel
$(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset
curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)
# Ensure we don't get surprised by umask settings
chmod 0755 $(TEMP_DIR)/qemu-$(QEMUARCH)-static
sed "s/CROSS_BUILD_//g" $(TEMP_DIR)/Dockerfile.build > $(TEMP_DIR)/Dockerfile.build.tmp
endif
mv $(TEMP_DIR)/Dockerfile.build.tmp $(TEMP_DIR)/Dockerfile.build
@ -67,13 +93,13 @@ endif
docker build --pull -t $(BUILD_IMAGE) -f $(TEMP_DIR)/Dockerfile.build $(TEMP_DIR)
docker create --name $(BUILD_IMAGE) $(BUILD_IMAGE)
docker export $(BUILD_IMAGE) > $(TEMP_DIR)/$(TAR_FILE)
docker build -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
docker build -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
rm -rf $(TEMP_DIR)
push: build
docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG)
docker push $(IMAGE)-$(ARCH):$(TAG)
clean:
docker rmi -f $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) || true
docker rmi -f $(IMAGE)-$(ARCH):$(TAG) || true
docker rmi -f $(BUILD_IMAGE) || true
docker rm -f $(BUILD_IMAGE) || true

View File

@ -14,6 +14,22 @@
FROM BASEIMAGE
# TODO(#69896): deprecate the shortened aliases in /
RUN ln -s /hyperkube /apiserver \
&& ln -s /hyperkube /cloud-controller-manager \
&& ln -s /hyperkube /controller-manager \
&& ln -s /hyperkube /kubectl \
&& ln -s /hyperkube /kubelet \
&& ln -s /hyperkube /proxy \
&& ln -s /hyperkube /scheduler \
&& ln -s /hyperkube /usr/local/bin/cloud-controller-manager \
&& ln -s /hyperkube /usr/local/bin/kube-apiserver \
&& ln -s /hyperkube /usr/local/bin/kube-controller-manager \
&& ln -s /hyperkube /usr/local/bin/kube-proxy \
&& ln -s /hyperkube /usr/local/bin/kube-scheduler \
&& ln -s /hyperkube /usr/local/bin/kubectl \
&& ln -s /hyperkube /usr/local/bin/kubelet
RUN echo CACHEBUST>/dev/null && clean-install \
bash
@ -38,6 +54,7 @@ RUN echo CACHEBUST>/dev/null && clean-install \
jq \
kmod \
openssh-client \
netbase \
nfs-common \
socat \
udev \

View File

@ -18,20 +18,43 @@
# [ARCH=amd64] [REGISTRY="staging-k8s.gcr.io"] make (build|push)
REGISTRY?=staging-k8s.gcr.io
IMAGE?=debian-hyperkube-base
TAG=0.10
IMAGE?=$(REGISTRY)/debian-hyperkube-base
TAG=0.12.0
ARCH?=amd64
ALL_ARCH = amd64 arm arm64 ppc64le s390x
CACHEBUST?=1
BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3
BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.0
CNI_VERSION=v0.6.0
TEMP_DIR:=$(shell mktemp -d)
CNI_TARBALL=cni-plugins-$(ARCH)-$(CNI_VERSION).tgz
.PHONY: all build push clean
# This option is for running docker manifest command
export DOCKER_CLI_EXPERIMENTAL := enabled
all: push
SUDO=$(if $(filter 0,$(shell id -u)),,sudo)
.PHONY: all build push clean all-build all-push-images all-push push-manifest
all: all-push
sub-build-%:
$(MAKE) ARCH=$* build
all-build: $(addprefix sub-build-,$(ALL_ARCH))
sub-push-image-%:
$(MAKE) ARCH=$* push
all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH))
all-push: all-push-images push-manifest
push-manifest:
docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
docker manifest push --purge ${IMAGE}:${TAG}
cni-tars/$(CNI_TARBALL):
mkdir -p cni-tars/
@ -51,10 +74,12 @@ endif
mkdir -p ${TEMP_DIR}/cni-bin/bin
tar -xz -C ${TEMP_DIR}/cni-bin/bin -f "cni-tars/${CNI_TARBALL}"
ifneq ($(ARCH),amd64)
# Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel
docker run --rm --privileged multiarch/qemu-user-static:register --reset
docker build --pull -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
$(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset
endif
docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
rm -rf $(TEMP_DIR)
push: build
docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG)
docker push $(IMAGE)-$(ARCH):$(TAG)

View File

@ -0,0 +1,2 @@
labels:
- sig/release

View File

@ -10,24 +10,16 @@ This image is compiled for multiple architectures.
If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile.
```console
# Build for linux/amd64 (default)
$ make push ARCH=amd64
# Build and push images for all the architectures
$ make all-push
# ---> staging-k8s.gcr.io/debian-hyperkube-base-amd64:TAG
$ make push ARCH=arm
# ---> staging-k8s.gcr.io/debian-hyperkube-base-arm:TAG
$ make push ARCH=arm64
# ---> staging-k8s.gcr.io/debian-hyperkube-base-arm64:TAG
$ make push ARCH=ppc64le
# ---> staging-k8s.gcr.io/debian-hyperkube-base-ppc64le:TAG
$ make push ARCH=s390x
# ---> staging-k8s.gcr.io/debian-hyperkube-base-s390x:TAG
```
If you don't want to push the images, run `make build` instead
If you don't want to push the images, run `make all-build` instead
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-hyperkube-base/README.md?pixel)]()

View File

@ -14,13 +14,10 @@
FROM BASEIMAGE
# If we're building for another architecture than amd64, the CROSS_BUILD_ placeholder is removed so e.g. CROSS_BUILD_COPY turns into COPY
# If we're building normally, for amd64, CROSS_BUILD lines are removed
CROSS_BUILD_COPY qemu-ARCH-static /usr/bin/
RUN clean-install \
conntrack \
ebtables \
ipset \
iptables \
kmod
kmod \
netbase

View File

@ -12,49 +12,51 @@
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build push
.PHONY: build push all all-build all-push-images all-push push-manifest
REGISTRY?="staging-k8s.gcr.io"
IMAGE=debian-iptables
TAG=v10
IMAGE=$(REGISTRY)/debian-iptables
TAG?=v11.0
ARCH?=amd64
ALL_ARCH = amd64 arm arm64 ppc64le s390x
TEMP_DIR:=$(shell mktemp -d)
QEMUVERSION=v2.9.1
ifeq ($(ARCH),arm)
QEMUARCH=arm
endif
ifeq ($(ARCH),arm64)
QEMUARCH=aarch64
endif
ifeq ($(ARCH),ppc64le)
QEMUARCH=ppc64le
endif
ifeq ($(ARCH),s390x)
QEMUARCH=s390x
endif
BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.4.0
BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3
# This option is for running docker manifest command
export DOCKER_CLI_EXPERIMENTAL := enabled
SUDO=$(if $(filter 0,$(shell id -u)),,sudo)
build:
cp ./* $(TEMP_DIR)
cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
cd $(TEMP_DIR) && sed -i "s|ARCH|$(QEMUARCH)|g" Dockerfile
ifeq ($(ARCH),amd64)
# When building "normally" for amd64, remove the whole line, it has no part in the amd64 image
cd $(TEMP_DIR) && sed -i "/CROSS_BUILD_/d" Dockerfile
else
# When cross-building, only the placeholder "CROSS_BUILD_" should be removed
# Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel
docker run --rm --privileged multiarch/qemu-user-static:register --reset
curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)
cd $(TEMP_DIR) && sed -i "s/CROSS_BUILD_//g" Dockerfile
ifneq ($(ARCH),amd64)
# Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel
$(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset
endif
docker build --pull -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
push: build
docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG)
docker push $(IMAGE)-$(ARCH):$(TAG)
all: push
sub-build-%:
$(MAKE) ARCH=$* build
all-build: $(addprefix sub-build-,$(ALL_ARCH))
sub-push-image-%:
$(MAKE) ARCH=$* push
all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH))
all-push: all-push-images push-manifest
push-manifest:
docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
docker manifest push --purge ${IMAGE}:${TAG}
all: all-push

View File

@ -9,24 +9,16 @@ This image is compiled for multiple architectures.
If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile.
```console
# Build for linux/amd64 (default)
$ make push ARCH=amd64
Build and push images for all the architectures
$ make all-push
# ---> staging-k8s.gcr.io/debian-iptables-amd64:TAG
$ make push ARCH=arm
# ---> staging-k8s.gcr.io/debian-iptables-arm:TAG
$ make push ARCH=arm64
# ---> staging-k8s.gcr.io/debian-iptables-arm64:TAG
$ make push ARCH=ppc64le
# ---> staging-k8s.gcr.io/debian-iptables-ppc64le:TAG
$ make push ARCH=s390x
# ---> staging-k8s.gcr.io/debian-iptables-s390x:TAG
```
If you don't want to push the images, run `make` or `make build` instead
If you don't want to push the images, run `make build ARCH={target_arch}` or `make all-build` instead
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-iptables/README.md?pixel)]()

2
vendor/k8s.io/kubernetes/build/debs/50-kubeadm.conf generated vendored Normal file
View File

@ -0,0 +1,2 @@
# The file is provided as part of the kubeadm package
net.ipv4.ip_forward = 1

View File

@ -13,7 +13,6 @@ load("//build:workspace.bzl", "CRI_TOOLS_VERSION")
release_filegroup(
name = "debs",
srcs = [
":cloud-controller-manager.deb",
":cri-tools.deb",
":kubeadm.deb",
":kubectl.deb",
@ -79,6 +78,16 @@ deb_data(
"mode": "644",
"dir": "/etc/systemd/system/kubelet.service.d",
},
{
"files": ["kubeadm.conf"],
"mode": "644",
"dir": "/usr/lib/modules-load.d",
},
{
"files": ["50-kubeadm.conf"],
"mode": "644",
"dir": "/etc/sysctl.d",
},
],
)
@ -160,6 +169,7 @@ k8s_deb(
"kubelet (>= 1.8.0)",
"kubectl (>= 1.8.0)",
"kubernetes-cni (>= 0.5.1)",
"cri-tools (>= 1.11.0)",
],
description = """Kubernetes Cluster Bootstrapping Tool
The Kubernetes command line tool for bootstrapping a Kubernetes cluster.

View File

@ -3,8 +3,11 @@ reviewers:
- jbeda
- mikedanese
- pipejakob
- chuckha
- timothysc
approvers:
- luxas
- jbeda
- mikedanese
- pipejakob
- timothysc

2
vendor/k8s.io/kubernetes/build/debs/kubeadm.conf generated vendored Normal file
View File

@ -0,0 +1,2 @@
# Load br_netfilter module at boot
br_netfilter

View File

@ -29,6 +29,7 @@ readonly RELEASE_TARS="${LOCAL_OUTPUT_ROOT}/release-tars"
readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images"
KUBE_BUILD_HYPERKUBE=${KUBE_BUILD_HYPERKUBE:-y}
KUBE_BUILD_CONFORMANCE=${KUBE_BUILD_CONFORMANCE:-y}
# Validate a ci version
#
@ -193,17 +194,46 @@ function kube::release::package_node_tarballs() {
done
}
# Package up all of the server binaries in docker images
function kube::release::build_server_images() {
# Clean out any old images
rm -rf "${RELEASE_IMAGES}"
local platform
for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
local arch=$(basename "${platform}")
kube::log::status "Building images: $platform_tag"
local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}/server/bin"
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# KUBE_SERVER_IMAGE_BINARIES array.
cp "${KUBE_SERVER_IMAGE_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/server/bin/"
# if we are building hyperkube, we also need to copy that binary
if [[ "${KUBE_BUILD_HYPERKUBE}" =~ [yY] ]]; then
cp "${LOCAL_OUTPUT_BINPATH}/${platform}/hyperkube" "${release_stage}/server/bin"
fi
kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
done
}
# Package up all of the server binaries
function kube::release::package_server_tarballs() {
kube::release::build_server_images
local platform
for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
local arch=$(basename "${platform}")
kube::log::status "Building tarball: server $platform_tag"
# NOTE: this directory was setup in kube::release::build_server_images
local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}/server/bin"
mkdir -p "${release_stage}/addons"
# This fancy expression will expand to prepend a path
@ -212,8 +242,6 @@ function kube::release::package_server_tarballs() {
cp "${KUBE_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/server/bin/"
kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
# Include the client binaries here too as they are useful debugging tools.
local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
if [[ "${platform%/*}" == "windows" ]]; then
@ -262,15 +290,28 @@ function kube::release::build_hyperkube_image() {
if [[ -n "${save_dir}" ]]; then
"${DOCKER[@]}" save "${hyperkube_tag}" > "${save_dir}/hyperkube-${arch}.tar"
fi
if [[ -z "${KUBE_DOCKER_IMAGE_TAG-}" || -z "${KUBE_DOCKER_REGISTRY-}" ]]; then
# not a release
kube::log::status "Deleting hyperkube image ${hyperkube_tag}"
"${DOCKER[@]}" rmi "${hyperkube_tag}" &>/dev/null || true
fi
kube::log::status "Deleting hyperkube image ${hyperkube_tag}"
"${DOCKER[@]}" rmi "${hyperkube_tag}" &>/dev/null || true
}
# This will take binaries that run on master and creates Docker images
# that wrap the binary in them. (One docker image per binary)
function kube::release::build_conformance_image() {
local -r arch="$1"
local -r registry="$2"
local -r version="$3"
local -r save_dir="${4-}"
kube::log::status "Building conformance image for arch: ${arch}"
ARCH="${arch}" REGISTRY="${registry}" VERSION="${version}" \
make -C cluster/images/conformance/ build >/dev/null
local conformance_tag="${registry}/conformance-${arch}:${version}"
if [[ -n "${save_dir}" ]]; then
"${DOCKER[@]}" save "${conformance_tag}" > "${save_dir}/conformance-${arch}.tar"
fi
kube::log::status "Deleting conformance image ${conformance_tag}"
"${DOCKER[@]}" rmi "${conformance_tag}" &>/dev/null || true
}
# This builds all the release docker images (One docker image per binary)
# Args:
# $1 - binary_dir, the directory to save the tared images to.
# $2 - arch, architecture for which we are building docker images.
@ -285,10 +326,6 @@ function kube::release::create_docker_images_for_server() {
mkdir -p "${images_dir}"
local -r docker_registry="k8s.gcr.io"
# TODO(thockin): Remove all traces of this after 1.11 release.
# The following is the old non-indirected registry name. To ease the
# transition to the new name (above), we are double-tagging saved images.
local -r deprecated_registry="gcr.io/google_containers"
# Docker tags cannot contain '+'
local docker_tag="${KUBE_GIT_VERSION/+/_}"
if [[ -z "${docker_tag}" ]]; then
@ -309,17 +346,14 @@ function kube::release::create_docker_images_for_server() {
local docker_file_path="${docker_build_path}/Dockerfile"
local binary_file_path="${binary_dir}/${binary_name}"
local docker_image_tag="${docker_registry}"
local deprecated_image_tag="${deprecated_registry}"
if [[ ${arch} == "amd64" ]]; then
# If we are building a amd64 docker image, preserve the original
# image name
docker_image_tag+="/${binary_name}:${docker_tag}"
deprecated_image_tag+="/${binary_name}:${docker_tag}"
else
# If we are building a docker image for another architecture,
# append the arch in the image tag
docker_image_tag+="/${binary_name}-${arch}:${docker_tag}"
deprecated_image_tag+="/${binary_name}-${arch}:${docker_tag}"
fi
@ -328,11 +362,18 @@ function kube::release::create_docker_images_for_server() {
rm -rf "${docker_build_path}"
mkdir -p "${docker_build_path}"
ln "${binary_dir}/${binary_name}" "${docker_build_path}/${binary_name}"
printf " FROM ${base_image} \n ADD ${binary_name} /usr/local/bin/${binary_name}\n" > "${docker_file_path}"
ln "${KUBE_ROOT}/build/nsswitch.conf" "${docker_build_path}/nsswitch.conf"
chmod 0644 "${docker_build_path}/nsswitch.conf"
cat <<EOF > "${docker_file_path}"
FROM ${base_image}
COPY ${binary_name} /usr/local/bin/${binary_name}
EOF
# ensure /etc/nsswitch.conf exists so go's resolver respects /etc/hosts
if [[ "${base_image}" =~ busybox ]]; then
echo "COPY nsswitch.conf /etc/" >> "${docker_file_path}"
fi
"${DOCKER[@]}" build --pull -q -t "${docker_image_tag}" "${docker_build_path}" >/dev/null
"${DOCKER[@]}" tag "${docker_image_tag}" "${deprecated_image_tag}" >/dev/null
"${DOCKER[@]}" save "${docker_image_tag}" "${deprecated_image_tag}" > "${binary_dir}/${binary_name}.tar"
"${DOCKER[@]}" save "${docker_image_tag}" > "${binary_dir}/${binary_name}.tar"
echo "${docker_tag}" > "${binary_dir}/${binary_name}.docker_tag"
rm -rf "${docker_build_path}"
ln "${binary_dir}/${binary_name}.tar" "${images_dir}/"
@ -351,7 +392,6 @@ function kube::release::create_docker_images_for_server() {
# not a release
kube::log::status "Deleting docker image ${docker_image_tag}"
"${DOCKER[@]}" rmi "${docker_image_tag}" &>/dev/null || true
"${DOCKER[@]}" rmi "${deprecated_image_tag}" &>/dev/null || true
fi
) &
done
@ -360,6 +400,10 @@ function kube::release::create_docker_images_for_server() {
kube::release::build_hyperkube_image "${arch}" "${docker_registry}" \
"${docker_tag}" "${images_dir}" &
fi
if [[ "${KUBE_BUILD_CONFORMANCE}" =~ [yY] ]]; then
kube::release::build_conformance_image "${arch}" "${docker_registry}" \
"${docker_tag}" "${images_dir}" &
fi
kube::util::wait-for-jobs || { kube::log::error "previous Docker build failed"; return 1; }
kube::log::status "Docker builds done"
@ -382,14 +426,11 @@ function kube::release::package_kube_manifests_tarball() {
cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
cp "${src_dir}/etcd.manifest" "${dst_dir}"
cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}"
cp "${src_dir}/kms-plugin-container.manifest" "${dst_dir}"
cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}"
cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
cp "${src_dir}/glbc.manifest" "${dst_dir}"
cp "${src_dir}/rescheduler.manifest" "${dst_dir}/"
cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/"
cp "${src_dir}/etcd-empty-dir-cleanup.yaml" "${dst_dir}/"
local internal_manifest
for internal_manifest in $(ls "${src_dir}" | grep "^internal-*"); do

2
vendor/k8s.io/kubernetes/build/nsswitch.conf generated vendored Normal file
View File

@ -0,0 +1,2 @@
# ensure go's non-cgo resolver respects /etc/hosts
hosts: files dns

42
vendor/k8s.io/kubernetes/build/release-images.sh generated vendored Executable file
View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Build Kubernetes release images. This will build the server target binaries,
# and create wrap them in Docker images, see `make release` for full releases
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/build/common.sh"
source "${KUBE_ROOT}/build/lib/release.sh"
CMD_TARGETS="${KUBE_SERVER_IMAGE_TARGETS[*]}"
if [[ "${KUBE_BUILD_HYPERKUBE}" =~ [yY] ]]; then
CMD_TARGETS="${CMD_TARGETS} cmd/hyperkube"
fi
if [[ "${KUBE_BUILD_CONFORMANCE}" =~ [yY] ]]; then
CMD_TARGETS="${CMD_TARGETS} ${KUBE_CONFORMANCE_IMAGE_TARGETS[*]}"
fi
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command make all WHAT="${CMD_TARGETS}" KUBE_BUILD_PLATFORMS="${KUBE_SERVER_PLATFORMS[*]}"
kube::build::copy_output
kube::release::build_server_images

View File

@ -1,16 +1,20 @@
# gazelle:build_file_name BUILD,BUILD.bazel
# gazelle:exclude _artifacts
# gazelle:exclude _gopath
# gazelle:exclude _output
# gazelle:exclude _tmp
# gazelle:prefix k8s.io/kubernetes
# Disable proto rules, since the Go sources are currently generated by
# hack/update-generated-protobuf.sh and checked into the repo.
# gazelle:proto disable_global
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_prefix")
load("@io_kubernetes_build//defs:build.bzl", "gcs_upload")
go_prefix("k8s.io/kubernetes")
filegroup(
name = "_binary-artifacts-and-hashes",
srcs = [

View File

@ -124,7 +124,7 @@ ifeq ($(PRINT_HELP),y)
verify:
@echo "$$VERIFY_HELP_INFO"
else
verify: verify_generated_files
verify:
KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/make-rules/verify.sh
endif
@ -139,7 +139,7 @@ ifeq ($(PRINT_HELP),y)
quick-verify:
@echo "$$QUICK_VERIFY_HELP_INFO"
else
quick-verify: verify_generated_files
quick-verify:
QUICK=true SILENT=false hack/make-rules/verify.sh
endif
@ -386,6 +386,25 @@ release-in-a-container:
build/release-in-a-container.sh
endif
define RELEASE_IMAGES_HELP_INFO
# Build release images
#
# Args:
# KUBE_BUILD_HYPERKUBE: Whether to build hyperkube image as well. Set to 'n' to skip.
# KUBE_BUILD_CONFORMANCE: Whether to build conformance testing image as well. Set to 'n' to skip.
#
# Example:
# make release-images
endef
.PHONY: release-images
ifeq ($(PRINT_HELP),y)
release-images:
@echo "$$RELEASE_IMAGES_HELP_INFO"
else
release-images:
build/release-images.sh
endif
define RELEASE_SKIP_TESTS_HELP_INFO
# Build a release, but skip tests
#
@ -408,6 +427,27 @@ release-skip-tests quick-release:
build/release.sh
endif
define QUICK_RELEASE_IMAGES_HELP_INFO
# Build release images, but only for linux/amd64
#
# Args:
# KUBE_FASTBUILD: Whether to cross-compile for other architectures. Set to 'false' to do so.
# KUBE_BUILD_HYPERKUBE: Whether to build hyperkube image as well. Set to 'n' to skip.
# KUBE_BUILD_CONFORMANCE: Whether to build conformance testing image as well. Set to 'n' to skip.
#
# Example:
# make quick-release-images
endef
.PHONY: quick-release-images
ifeq ($(PRINT_HELP),y)
quick-release-images:
@echo "$$QUICK_RELEASE_IMAGES_HELP_INFO"
else
quick-release-images: KUBE_FASTBUILD = true
quick-release-images:
build/release-images.sh
endif
define PACKAGE_HELP_INFO
# Package tarballs
# Use the 'package-tarballs' target to run the final packaging steps of
@ -483,21 +523,6 @@ generated_files:
$(MAKE) -f Makefile.generated_files $@ CALLED_FROM_MAIN_MAKEFILE=1
endif
define VERIFY_GENERATED_FILES_HELP_INFO
# Verify auto-generated files needed for the build.
#
# Example:
# make verify_generated_files
endef
.PHONY: verify_generated_files
ifeq ($(PRINT_HELP),y)
verify_generated_files:
@echo "$$VERIFY_GENERATED_FILES_HELP_INFO"
else
verify_generated_files:
$(MAKE) -f Makefile.generated_files $@ CALLED_FROM_MAIN_MAKEFILE=1
endif
define HELP_INFO
# Print make targets and help info
#

View File

@ -37,81 +37,54 @@ SHELL := /bin/bash
.PHONY: generated_files
generated_files: gen_deepcopy gen_defaulter gen_conversion gen_openapi gen_bindata
.PHONY: verify_generated_files
verify_generated_files: verify_gen_deepcopy \
verify_gen_defaulter \
verify_gen_conversion
#
# Helper logic to calculate Go's dependency DAG ourselves.
#
# This is a file that will be emitted by the go2make tool, containing a
# variable for each Go package in the project (including deps) which lists all
# of the transitive deps of that package. Each variable is named the same as
# the package - for example the variable for `k8s.io/kubernetes/pkg/api` is
# $(k8s.io/kubernetes/pkg/api). This is roughly the same DAG that the Go
# compiler uses. These variables can be used to figure out if, for example,
# generated code needs to be regenerated.
GO_PKGDEPS_FILE = go-pkgdeps.mk
# Include the Go package dependencies file. This will cause the rule of
# the same name to be considered and if it is updated, make will restart and
# reload the updated deps.
sinclude $(META_DIR)/$(GO_PKGDEPS_FILE)
# Update the set of Go deps for our project. This will let us determine if
# we really need to do expensive codegen. We use FORCE because it is not a
# PHONY file, but we do want it to be re-evaluated every time make is run. The
# file will only be touched if it actually changes.
$(META_DIR)/$(GO_PKGDEPS_FILE): FORCE
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: calculating Go dependencies"; \
fi
hack/run-in-gopath.sh go install ./hack/make-rules/helpers/go2make
hack/run-in-gopath.sh go2make \
k8s.io/kubernetes/... \
--prune k8s.io/kubernetes/staging \
--prune k8s.io/kubernetes/vendor \
k8s.io/kubernetes/vendor/k8s.io/... \
github.com/jteeuwen/go-bindata/go-bindata/... \
> $@.tmp
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(GO_PKGDEPS_FILE) changed"; \
fi; \
cat $@.tmp > $@; \
fi
rm -f $@.tmp
.PHONY: FORCE
FORCE:
# Code-generation logic.
#
# This stuff can be pretty tricky, and there's probably some corner cases that
# we don't handle well. That said, here's a straightforward test to prove that
# the most common cases work. Sadly, it is manual.
# Helper logic to find which directories need codegen as quickly as possible.
#
# make clean
# find . -name .make\* | xargs rm -f
# find . -name zz_generated\* | xargs rm -f
# # verify `find . -name zz_generated.deepcopy.go | wc -l` is 0
# # verify `find . -name .make | wc -l` is 0
#
# make nonexistent
# # expect "No rule to make target"
# # verify `find .make/ -type f | wc -l` has many files
#
# make gen_deepcopy
# # expect deepcopy-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.deepcopy.go | wc -l` has files
# make gen_deepcopy
# # expect nothing to be rebuilt, finish in O(seconds)
# touch pkg/api/types.go
# make gen_deepcopy
# # expect one file to be regenerated
# make gen_deepcopy
# # expect nothing to be rebuilt, finish in O(seconds)
# touch vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go
# make gen_deepcopy
# # expect deepcopy-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.deepcopy.go | wc -l` has files
# make gen_deepcopy
# # expect nothing to be rebuilt, finish in O(seconds)
#
# make gen_conversion
# # expect conversion-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.conversion.go | wc -l` has files
# make gen_conversion
# # expect nothing to be rebuilt, finish in O(seconds)
# touch pkg/api/types.go
# make gen_conversion
# # expect one file to be regenerated
# make gen_conversion
# # expect nothing to be rebuilt, finish in O(seconds)
# touch vendor/k8s.io/code-generator/cmd/conversion-gen/main.go
# make gen_conversion
# # expect conversion-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.conversion.go | wc -l` has files
# make gen_conversion
# # expect nothing to be rebuilt, finish in O(seconds)
#
# make all
# # expect it to build
#
# make test
# # expect it to pass
#
# make clean
# # verify `find . -name zz_generated.deepcopy.go | wc -l` is 0
# # verify `find . -name .make | wc -l` is 0
#
# make all WHAT=cmd/kube-proxy
# # expect it to build
#
# make clean
# make test WHAT=cmd/kube-proxy
# # expect it to pass
# This variable holds a list of every directory that contains Go files in this
# project. Other rules and variables can use this as a starting point to
@ -123,63 +96,6 @@ ALL_GO_DIRS := $(shell \
hack/make-rules/helpers/cache_go_dirs.sh $(META_DIR)/all_go_dirs.mk \
)
# The name of the metadata file which lists *.go files in each pkg.
GOFILES_META := gofiles.mk
# Establish a dependency between the deps file and the dir. Whenever a dir
# changes (files added or removed) the deps file will be considered stale.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# This is looser than we really need (e.g. we don't really care about non *.go
# files or even *_test.go files), but this is much easier to represent.
#
# Because we 'sinclude' the deps file, it is considered for rebuilding, as part
# of make's normal evaluation. If it gets rebuilt, make will restart.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
$(foreach dir, $(ALL_GO_DIRS), $(eval \
$(META_DIR)/$(dir)/$(GOFILES_META): $(dir) \
))
# How to rebuild a deps file. When make determines that the deps file is stale
# (see above), it executes this rule, and then re-loads the deps file.
#
# This is looser than we really need (e.g. we don't really care about test
# files), but this is MUCH faster than calling `go list`.
#
# We regenerate the output file in order to satisfy make's "newer than" rules,
# but we only need to rebuild targets if the contents actually changed. That
# is what the .stamp file represents.
$(foreach dir, $(ALL_GO_DIRS), \
$(META_DIR)/$(dir)/$(GOFILES_META)):
FILES=$$(ls $</*.go | grep --color=never -v $(GENERATED_FILE_PREFIX)); \
mkdir -p $(@D); \
echo "gofiles__$< := $$(echo $${FILES})" >$@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: gofiles changed for $@"; \
fi; \
touch $@.stamp; \
fi; \
mv $@.tmp $@
# This is required to fill in the DAG, since some cases (e.g. 'make clean all')
# will reference the .stamp file when it doesn't exist. We don't need to
# rebuild it in that case, just keep make happy.
$(foreach dir, $(ALL_GO_DIRS), \
$(META_DIR)/$(dir)/$(GOFILES_META).stamp):
# Include any deps files as additional Makefile rules. This triggers make to
# consider the deps files for rebuild, which makes the whole
# dependency-management logic work. 'sinclude' is "silent include" which does
# not fail if the file does not exist.
$(foreach dir, $(ALL_GO_DIRS), $(eval \
sinclude $(META_DIR)/$(dir)/$(GOFILES_META) \
))
# Generate a list of all files that have a `+k8s:` comment-tag. This will be
# used to derive lists of files/dirs for generation tools.
ifeq ($(DBG_MAKEFILE),1)
@ -190,7 +106,12 @@ ALL_K8S_TAG_FILES := $(shell \
| xargs grep --color=never -l '^// *+k8s:' \
)
#
# Code generation logic.
#
# Deep-copy generation
#
# Any package that wants deep-copy functions generated must include a
@ -220,34 +141,27 @@ DEEPCOPY_DIRS := $(shell \
)
DEEPCOPY_FILES := $(addsuffix /$(DEEPCOPY_FILENAME), $(DEEPCOPY_DIRS))
# Shell function for reuse in rules.
RUN_GEN_DEEPCOPY = \
function run_gen_deepcopy() { \
if [[ -f $(META_DIR)/$(DEEPCOPY_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(DEEPCOPY_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(DEEPCOPY_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(DEEPCOPY_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
--bounding-dirs $(PRJ_SRC_PATH),"k8s.io/api" \
-O $(DEEPCOPY_BASENAME) \
"$$@"; \
fi \
}; \
run_gen_deepcopy
# Reset the list of packages that need generation.
$(shell mkdir -p $$(dirname $(META_DIR)/$(DEEPCOPY_GEN)))
$(shell rm -f $(META_DIR)/$(DEEPCOPY_GEN).todo)
# This rule aggregates the set of files to generate and then generates them all
# in a single run of the tool.
.PHONY: gen_deepcopy
gen_deepcopy: $(DEEPCOPY_FILES) $(DEEPCOPY_GEN)
$(RUN_GEN_DEEPCOPY)
.PHONY: verify_gen_deepcopy
verify_gen_deepcopy: $(DEEPCOPY_GEN)
$(RUN_GEN_DEEPCOPY) --verify-only
gen_deepcopy: $(DEEPCOPY_GEN) $(META_DIR)/$(DEEPCOPY_GEN).todo
if [[ -s $(META_DIR)/$(DEEPCOPY_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(DEEPCOPY_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(DEEPCOPY_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(DEEPCOPY_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
--bounding-dirs $(PRJ_SRC_PATH),"k8s.io/api" \
-O $(DEEPCOPY_BASENAME) \
"$$@"; \
fi \
# For each dir in DEEPCOPY_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
@ -258,71 +172,35 @@ verify_gen_deepcopy: $(DEEPCOPY_GEN)
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(DEEPCOPY_DIRS), $(eval \
$(dir)/$(DEEPCOPY_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
$(foreach dir, $(DEEPCOPY_DIRS), $(eval \
$(dir)/$(DEEPCOPY_FILENAME): $($(PRJ_SRC_PATH)/$(dir)) \
))
# Unilaterally remove any leftovers from previous runs.
$(shell rm -f $(META_DIR)/$(DEEPCOPY_GEN)*.todo)
# How to regenerate deep-copy code. This is a little slow to run, so we batch
# it up and trigger the batch from the 'generated_files' target.
$(META_DIR)/$(DEEPCOPY_GEN).todo: $(DEEPCOPY_FILES)
$(DEEPCOPY_FILES): $(DEEPCOPY_GEN)
mkdir -p $$(dirname $(META_DIR)/$(DEEPCOPY_GEN))
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: deepcopy needed $(@D): $?"; \
ls -lf --full-time $@ $? || true; \
fi
echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEEPCOPY_GEN).todo
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(DEEPCOPY_GEN).mk
$(META_DIR)/$(DEEPCOPY_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(DEEPCOPY_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/deepcopy-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(DEEPCOPY_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(DEEPCOPY_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(DEEPCOPY_GEN).mk, above.
# the $(GO_PKGDEPS_FILE), above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(DEEPCOPY_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/deepcopy-gen
# newer than the binary, and try to "rebuild" it over and over. So we touch
# it, and make is happy.
$(DEEPCOPY_GEN): $(k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/deepcopy-gen)
KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/deepcopy-gen
touch $@
#
# Defaulter generation
#
# Any package that wants defaulter functions generated must include a
@ -359,134 +237,64 @@ DEFAULTER_DIRS := $(shell \
DEFAULTER_FILES := $(addsuffix /$(DEFAULTER_FILENAME), $(DEFAULTER_DIRS))
RUN_GEN_DEFAULTER := \
function run_gen_defaulter() { \
if [[ -f $(META_DIR)/$(DEFAULTER_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(DEFAULTER_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(DEFAULTER_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(DEFAULTER_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
--extra-peer-dirs $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(DEFAULTER_DIRS)) | sed 's/ /,/g') \
-O $(DEFAULTER_BASENAME) \
"$$@"; \
fi \
}; \
run_gen_defaulter
# Reset the list of packages that need generation.
$(shell mkdir -p $$(dirname $(META_DIR)/$(DEFAULTER_GEN)))
$(shell rm -f $(META_DIR)/$(DEFAULTER_GEN).todo)
# This rule aggregates the set of files to generate and then generates them all
# in a single run of the tool.
.PHONY: gen_defaulter
gen_defaulter: $(DEFAULTER_FILES) $(DEFAULTER_GEN)
$(RUN_GEN_DEFAULTER)
.PHONY: verify_gen_deepcopy
verify_gen_defaulter: $(DEFAULTER_GEN)
$(RUN_GEN_DEFAULTER) --verify-only
gen_defaulter: $(DEFAULTER_GEN) $(META_DIR)/$(DEFAULTER_GEN).todo
if [[ -s $(META_DIR)/$(DEFAULTER_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(DEFAULTER_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(DEFAULTER_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(DEFAULTER_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
--extra-peer-dirs $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(DEFAULTER_DIRS)) | sed 's/ /,/g') \
-O $(DEFAULTER_BASENAME) \
"$$@"; \
fi
# For each dir in DEFAULTER_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
# Note that this is a deps-only statement, not a full rule (see below for that).
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(DEFAULTER_DIRS), $(eval \
$(dir)/$(DEFAULTER_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
$(foreach dir, $(DEFAULTER_DIRS), $(eval \
$(dir)/$(DEFAULTER_FILENAME): $($(PRJ_SRC_PATH)/$(dir)) \
))
# For each dir in DEFAULTER_DIRS, for each target in $(defaulters__$(dir)),
# this establishes a dependency between the output file and the input files
# that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(DEFAULTER_DIRS), \
$(foreach tgt, $(defaulters__$(dir)), $(eval \
$(dir)/$(DEFAULTER_FILENAME): $(META_DIR)/$(tgt)/$(GOFILES_META).stamp \
$(gofiles__$(tgt)) \
)) \
)
# Unilaterally remove any leftovers from previous runs.
$(shell rm -f $(META_DIR)/$(DEFAULTER_GEN)*.todo)
# How to regenerate defaulter code. This is a little slow to run, so we batch
# it up and trigger the batch from the 'generated_files' target.
$(META_DIR)/$(DEFAULTER_GEN).todo: $(DEFAULTER_FILES)
$(DEFAULTER_FILES): $(DEFAULTER_GEN)
mkdir -p $$(dirname $(META_DIR)/$(DEFAULTER_GEN))
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: defaulter needed $(@D): $?"; \
ls -lf --full-time $@ $? || true; \
fi
echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEFAULTER_GEN).todo
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(DEFAULTER_GEN).mk
$(META_DIR)/$(DEFAULTER_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(DEFAULTER_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/defaulter-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(DEFAULTER_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(DEFAULTER_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(DEFAULTER_GEN).mk, above.
# the $(GO_PKGDEPS_FILE), above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(DEFAULTER_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/defaulter-gen
# newer than the binary, and try to "rebuild" it over and over. So we touch
# it, and make is happy.
$(DEFAULTER_GEN): $(k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/defaulter-gen)
KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/defaulter-gen
touch $@
#
# Conversion generation
#
# Any package that wants conversion functions generated must include one or
@ -526,171 +334,52 @@ CONVERSION_DIRS := $(shell \
CONVERSION_FILES := $(addsuffix /$(CONVERSION_FILENAME), $(CONVERSION_DIRS))
CONVERSION_EXTRA_PEER_DIRS := k8s.io/kubernetes/pkg/apis/core,k8s.io/kubernetes/pkg/apis/core/v1,k8s.io/api/core/v1
# Shell function for reuse in rules.
RUN_GEN_CONVERSION = \
function run_gen_conversion() { \
if [[ -f $(META_DIR)/$(CONVERSION_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(CONVERSION_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(CONVERSION_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(CONVERSION_GEN) \
--extra-peer-dirs $(CONVERSION_EXTRA_PEER_DIRS) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
-O $(CONVERSION_BASENAME) \
"$$@"; \
fi \
}; \
run_gen_conversion
# Reset the list of packages that need generation.
$(shell mkdir -p $$(dirname $(META_DIR)/$(CONVERSION_GEN)))
$(shell rm -f $(META_DIR)/$(CONVERSION_GEN).todo)
# This rule aggregates the set of files to generate and then generates them all
# in a single run of the tool.
.PHONY: gen_conversion
gen_conversion: $(CONVERSION_FILES) $(CONVERSION_GEN)
$(RUN_GEN_CONVERSION)
.PHONY: verify_gen_conversion
verify_gen_conversion: $(CONVERSION_GEN)
$(RUN_GEN_CONVERSION) --verify-only
# Establish a dependency between the deps file and the dir. Whenever a dir
# changes (files added or removed) the deps file will be considered stale.
#
# This is looser than we really need (e.g. we don't really care about non *.go
# files or even *_test.go files), but this is much easier to represent.
#
# Because we 'sinclude' the deps file, it is considered for rebuilding, as part
# of make's normal evaluation. If it gets rebuilt, make will restart.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
$(foreach dir, $(CONVERSION_DIRS), $(eval \
$(META_DIR)/$(dir)/$(CONVERSIONS_META): $(dir) \
))
# How to rebuild a deps file. When make determines that the deps file is stale
# (see above), it executes this rule, and then re-loads the deps file.
#
# This is looser than we really need (e.g. we don't really care about test
# files), but this is MUCH faster than calling `go list`.
#
# We regenerate the output file in order to satisfy make's "newer than" rules,
# but we only need to rebuild targets if the contents actually changed. That
# is what the .stamp file represents.
$(foreach dir, $(CONVERSION_DIRS), \
$(META_DIR)/$(dir)/$(CONVERSIONS_META)):
TAGS=$$(grep --color=never -h '^// *+k8s:conversion-gen=' $</*.go \
| cut -f2- -d= \
| sed 's|$(PRJ_SRC_PATH)/||' \
| sed 's|^k8s.io/|vendor/k8s.io/|'); \
mkdir -p $(@D); \
echo "conversions__$< := $$(echo $${TAGS})" >$@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: conversions changed for $@"; \
fi; \
touch $@.stamp; \
fi; \
mv $@.tmp $@
# Include any deps files as additional Makefile rules. This triggers make to
# consider the deps files for rebuild, which makes the whole
# dependency-management logic work. 'sinclude' is "silent include" which does
# not fail if the file does not exist.
$(foreach dir, $(CONVERSION_DIRS), $(eval \
sinclude $(META_DIR)/$(dir)/$(CONVERSIONS_META) \
))
gen_conversion: $(CONVERSION_GEN) $(META_DIR)/$(CONVERSION_GEN).todo
if [[ -s $(META_DIR)/$(CONVERSION_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(CONVERSION_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(CONVERSION_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(CONVERSION_GEN) \
--extra-peer-dirs $(CONVERSION_EXTRA_PEER_DIRS) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
-O $(CONVERSION_BASENAME) \
"$$@"; \
fi
# For each dir in CONVERSION_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
# Note that this is a deps-only statement, not a full rule (see below for that).
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(CONVERSION_DIRS), $(eval \
$(dir)/$(CONVERSION_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
$(foreach dir, $(CONVERSION_DIRS), $(eval \
$(dir)/$(CONVERSION_FILENAME): $($(PRJ_SRC_PATH)/$(dir)) \
))
# For each dir in CONVERSION_DIRS, for each target in $(conversions__$(dir)),
# this establishes a dependency between the output file and the input files
# that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(CONVERSION_DIRS), \
$(foreach tgt, $(conversions__$(dir)), $(eval \
$(dir)/$(CONVERSION_FILENAME): $(META_DIR)/$(tgt)/$(GOFILES_META).stamp \
$(gofiles__$(tgt)) \
)) \
)
# Unilaterally remove any leftovers from previous runs.
$(shell rm -f $(META_DIR)/$(CONVERSION_GEN)*.todo)
# How to regenerate conversion code. This is a little slow to run, so we batch
# it up and trigger the batch from the 'generated_files' target.
$(META_DIR)/$(CONVERSION_GEN).todo: $(CONVERSION_FILES)
$(CONVERSION_FILES): $(CONVERSION_GEN)
mkdir -p $$(dirname $(META_DIR)/$(CONVERSION_GEN))
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: conversion needed $(@D): $?"; \
ls -lf --full-time $@ $? || true; \
fi
echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(CONVERSION_GEN).todo
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(CONVERSION_GEN).mk
$(META_DIR)/$(CONVERSION_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(CONVERSION_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/conversion-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(CONVERSION_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(CONVERSION_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(CONVERSION_GEN).mk, above.
# the $(GO_PKGDEPS_FILE), above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
@ -698,12 +387,12 @@ sinclude $(META_DIR)/$(CONVERSION_GEN).mk
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(CONVERSION_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/conversion-gen
$(CONVERSION_GEN): $(k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/conversion-gen)
KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/conversion-gen
touch $@
#
# Open-api generation
# OpenAPI generation
#
# Any package that wants open-api functions generated must include a
# comment-tag in column 0 of one file of the form:
@ -713,6 +402,18 @@ $(CONVERSION_GEN):
OPENAPI_BASENAME := $(GENERATED_FILE_PREFIX)openapi
OPENAPI_FILENAME := $(OPENAPI_BASENAME).go
OPENAPI_OUTPUT_PKG := pkg/generated/openapi
BOILERPLATE_FILENAME := vendor/k8s.io/code-generator/hack/boilerplate.go.txt
REPORT_FILENAME := $(OUT_DIR)/violations.report
KNOWN_VIOLATION_FILENAME := api/api-rules/violation_exceptions.list
# When UPDATE_API_KNOWN_VIOLATIONS is set to be true, let the generator to write
# updated API violations to the known API violation exceptions list.
ifeq ($(UPDATE_API_KNOWN_VIOLATIONS),true)
REPORT_FILENAME:=$(KNOWN_VIOLATION_FILENAME)
# When UPDATE_API_KNOWN_VIOLATIONS is set to be true, touch the exceptions
# list so that the OPENAPI_OUTFILE target re-run instead of being cached.
$(shell touch $(KNOWN_VIOLATION_FILENAME))
endif
API_RULE_CHECK_FAILURE_MESSAGE := "ERROR: \n\t API rule check failed. Reported violations differ from known violations. Please read api/api-rules/README.md to resolve the failure. \n"
# The tool used to generate open apis.
OPENAPI_GEN := $(BIN_DIR)/openapi-gen
@ -736,77 +437,46 @@ gen_openapi: $(OPENAPI_OUTFILE) $(OPENAPI_GEN)
# For each dir in OPENAPI_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
# Note that this is a deps-only statement, not a full rule (see below for that).
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(OPENAPI_DIRS), $(eval \
$(OPENAPI_OUTFILE): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
$(foreach dir, $(OPENAPI_DIRS), $(eval \
$(OPENAPI_OUTFILE): $($(PRJ_SRC_PATH)/$(dir)) \
))
# How to regenerate open-api code. This emits a single file for all results.
$(OPENAPI_OUTFILE): $(OPENAPI_GEN) $(OPENAPI_GEN)
function run_gen_openapi() { \
./hack/run-in-gopath.sh $(OPENAPI_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(OPENAPI_DIRS)) | sed 's/ /,/g') \
-p $(PRJ_SRC_PATH)/$(OPENAPI_OUTPUT_PKG) \
-O $(OPENAPI_BASENAME) \
"$$@"; \
}; \
run_gen_openapi
# The Make rule fails if generated API rule violation report differs from the checked-in
# violation file, and prints error message to request developer to fix either the API
# source code, or the known API rule violation file.
$(OPENAPI_OUTFILE): $(OPENAPI_GEN) $(KNOWN_VIOLATION_FILENAME)
./hack/run-in-gopath.sh $(OPENAPI_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(OPENAPI_DIRS)) | sed 's/ /,/g') \
-p $(PRJ_SRC_PATH)/$(OPENAPI_OUTPUT_PKG) \
-O $(OPENAPI_BASENAME) \
-h $(BOILERPLATE_FILENAME) \
-r $(REPORT_FILENAME) \
"$$@"; \
diff $(REPORT_FILENAME) $(KNOWN_VIOLATION_FILENAME) || \
(echo -e $(API_RULE_CHECK_FAILURE_MESSAGE); exit 1)
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(OPENAPI_GEN).mk
$(META_DIR)/$(OPENAPI_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(OPENAPI_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/openapi-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(OPENAPI_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(OPENAPI_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(OPENAPI_GEN).mk, above.
# the $(GO_PKGDEPS_FILE), above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(OPENAPI_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/openapi-gen
# newer than the binary, and try to "rebuild" it over and over. So we touch
# it, and make is happy.
$(OPENAPI_GEN): $(k8s.io/kubernetes/vendor/k8s.io/kube-openapi/cmd/openapi-gen)
KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/kube-openapi/cmd/openapi-gen
touch $@
#
# bindata generation
#
@ -822,38 +492,6 @@ BINDATA_SCRIPT := hack/generate-bindata.sh
gen_bindata: $(BINDATA_GEN) FORCE
./hack/run-in-gopath.sh $(BINDATA_SCRIPT)
FORCE:
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(BINDATA_GEN).mk
$(META_DIR)/$(BINDATA_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(BINDATA_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/github.com/jteeuwen/go-bindata/go-bindata \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(BINDATA_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(BINDATA_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(BINDATA_GEN).mk, above.
#
@ -863,6 +501,6 @@ sinclude $(META_DIR)/$(BINDATA_GEN).mk
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(BINDATA_GEN):
hack/make-rules/build.sh ./vendor/github.com/jteeuwen/go-bindata/go-bindata
$(BINDATA_GEN): $(k8s.io/kubernetes/vendor/github.com/jteeuwen/go-bindata/go-bindata)
KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/github.com/jteeuwen/go-bindata/go-bindata
touch $@

View File

@ -1,17 +1,18 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("//build:workspace_mirror.bzl", "mirror")
load("//build:workspace.bzl", "CRI_TOOLS_VERSION")
http_archive(
name = "io_bazel_rules_go",
sha256 = "242602c9818a83cbe97d1446b48263dcd48949a74d713c172d1b03da841b168a",
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.10.5/rules_go-0.10.5.tar.gz"),
sha256 = "f87fa87475ea107b3c69196f39c82b7bbf58fe27c62a338684c20ca17d1d8613",
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.16.2/rules_go-0.16.2.tar.gz"),
)
http_archive(
name = "io_kubernetes_build",
sha256 = "007774f06536059f3f782d1a092bddc625d88c17f20bbe731cea844a52485b11",
strip_prefix = "repo-infra-97099dccc8807e9159dc28f374a8f0602cab07e1",
urls = mirror("https://github.com/kubernetes/repo-infra/archive/97099dccc8807e9159dc28f374a8f0602cab07e1.tar.gz"),
sha256 = "21160531ea8a9a4001610223ad815622bf60671d308988c7057168a495a7e2e8",
strip_prefix = "repo-infra-b4bc4f1552c7fc1d4654753ca9b0e5e13883429f",
urls = mirror("https://github.com/kubernetes/repo-infra/archive/b4bc4f1552c7fc1d4654753ca9b0e5e13883429f.tar.gz"),
)
http_archive(
@ -21,26 +22,26 @@ http_archive(
urls = mirror("https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"),
)
ETCD_VERSION = "3.2.18"
ETCD_VERSION = "3.2.24"
new_http_archive(
http_archive(
name = "com_coreos_etcd",
build_file = "third_party/etcd.BUILD",
sha256 = "b729db0732448064271ea6fdcb901773c4fe917763ca07776f22d0e5e0bd4097",
build_file = "@//third_party:etcd.BUILD",
sha256 = "947849dbcfa13927c81236fb76a7c01d587bbab42ab1e807184cd91b026ebed7",
strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION,
urls = mirror("https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)),
)
http_archive(
name = "io_bazel_rules_docker",
sha256 = "c440717ee9b1b2f4a1e9bf5622539feb5aef9db83fc1fa1517818f13c041b0be",
strip_prefix = "rules_docker-8bbe2a8abd382641e65ff7127a3700a8530f02ce",
urls = mirror("https://github.com/bazelbuild/rules_docker/archive/8bbe2a8abd382641e65ff7127a3700a8530f02ce.tar.gz"),
sha256 = "29d109605e0d6f9c892584f07275b8c9260803bf0c6fcb7de2623b2bedc910bd",
strip_prefix = "rules_docker-0.5.1",
urls = mirror("https://github.com/bazelbuild/rules_docker/archive/v0.5.1.tar.gz"),
)
load("@bazel_skylib//:lib.bzl", "versions")
versions.check(minimum_bazel_version = "0.13.0")
versions.check(minimum_bazel_version = "0.17.2")
load("@io_bazel_rules_go//go:def.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies")
load("@io_bazel_rules_docker//docker:docker.bzl", "docker_pull", "docker_repositories")
@ -48,42 +49,52 @@ load("@io_bazel_rules_docker//docker:docker.bzl", "docker_pull", "docker_reposit
go_rules_dependencies()
go_register_toolchains(
go_version = "1.10.3",
go_version = "1.11.2",
)
docker_repositories()
http_file(
name = "kubernetes_cni",
downloaded_file_path = "kubernetes_cni.tgz",
sha256 = "f04339a21b8edf76d415e7f17b620e63b8f37a76b2f706671587ab6464411f2d",
urls = mirror("https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz"),
)
http_file(
name = "cri_tools",
sha256 = "bdc838174778223a1af4bdeaaed4bd266120c0e152588f78750fb86221677fb4",
downloaded_file_path = "cri_tools.tgz",
sha256 = "e7d913bcce40bf54e37ab1d4b75013c823d0551e6bc088b217bc1893207b4844",
urls = mirror("https://github.com/kubernetes-incubator/cri-tools/releases/download/v%s/crictl-v%s-linux-amd64.tar.gz" % (CRI_TOOLS_VERSION, CRI_TOOLS_VERSION)),
)
docker_pull(
name = "debian-base-amd64",
digest = "sha256:86176bc8ccdc4d8ea7fbf6ba4b57fcefc2cb61ff7413114630940474ff9bf751",
registry = "k8s.gcr.io",
repository = "debian-base-amd64",
tag = "0.4.0", # ignored, but kept here for documentation
)
docker_pull(
name = "debian-iptables-amd64",
digest = "sha256:fb18678f8203ca1bd2fad2671e3ebd80cb408a1baae423d4ad39c05f4caac4e1",
digest = "sha256:d4ff8136b9037694a3165a7fff6a91e7fc828741b8ea1eda226d4d9ea5d23abb",
registry = "k8s.gcr.io",
repository = "debian-iptables-amd64",
tag = "v10", # ignored, but kept here for documentation
tag = "v11.0", # ignored, but kept here for documentation
)
docker_pull(
name = "debian-hyperkube-base-amd64",
digest = "sha256:cc782ed16599000ca4c85d47ec6264753747ae1e77520894dca84b104a7621e2",
digest = "sha256:4a77bc882f7d629c088a11ff144a2e86660268fddf63b61f52b6a93d16ab83f0",
registry = "k8s.gcr.io",
repository = "debian-hyperkube-base-amd64",
tag = "0.10", # ignored, but kept here for documentation
tag = "0.12.0", # ignored, but kept here for documentation
)
docker_pull(
name = "official_busybox",
digest = "sha256:4cee1979ba0bf7db9fc5d28fb7b798ca69ae95a47c5fecf46327720df4ff352d",
digest = "sha256:cb63aa0641a885f54de20f61d152187419e8f6b159ed11a251a09d115fdff9bd",
registry = "index.docker.io",
repository = "library/busybox",
tag = "latest", # ignored, but kept here for documentation

2
vendor/k8s.io/kubernetes/build/rpms/50-kubeadm.conf generated vendored Normal file
View File

@ -0,0 +1,2 @@
# The file is provided as part of the kubeadm package
net.ipv4.ip_forward = 1

View File

@ -75,8 +75,7 @@ pkg_rpm(
],
spec_file = "cri-tools.spec",
tags = ["manual"],
# dashes are not allowed in rpm versions
version = CRI_TOOLS_VERSION.replace("-", "_"),
version = CRI_TOOLS_VERSION,
)
filegroup(

13
vendor/k8s.io/kubernetes/build/rpms/OWNERS generated vendored Normal file
View File

@ -0,0 +1,13 @@
reviewers:
- luxas
- jbeda
- mikedanese
- pipejakob
- chuckha
- timothysc
approvers:
- luxas
- jbeda
- mikedanese
- pipejakob
- timothysc

View File

@ -10,8 +10,8 @@ URL: https://kubernetes.io
Binaries to interface with the container runtime.
%prep
# TODO(chuckha): update this to use %{version} when the dash is removed from the release
tar -xzf {crictl-v1.0.0-beta.1-linux-amd64.tar.gz}
# This has to be hard coded because bazel does a path substitution before rpm's %{version} is substituted.
tar -xzf {crictl-v1.12.0-linux-amd64.tar.gz}
%install
install -m 755 -d %{buildroot}%{_bindir}

2
vendor/k8s.io/kubernetes/build/rpms/kubeadm.conf generated vendored Normal file
View File

@ -0,0 +1,2 @@
# Load br_netfilter module at boot
br_netfilter

View File

@ -6,6 +6,7 @@ Summary: Container Cluster Manager - Kubernetes Cluster Bootstrapping Tool
Requires: kubelet >= 1.8.0
Requires: kubectl >= 1.8.0
Requires: kubernetes-cni >= 0.5.1
Requires: cri-tools >= 1.11.0
URL: https://kubernetes.io
@ -18,10 +19,17 @@ install -m 755 -d %{buildroot}%{_sysconfdir}/systemd/system/
install -m 755 -d %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/
install -m 755 -d %{buildroot}%{_sysconfdir}/sysconfig/
install -p -m 755 -t %{buildroot}%{_bindir} {kubeadm}
install -p -m 755 -t %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/ {10-kubeadm.conf}
install -p -m 755 -T {kubelet.env} %{buildroot}%{_sysconfdir}/sysconfig/kubelet
install -p -m 644 -t %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/ {10-kubeadm.conf}
install -p -m 644 -T {kubelet.env} %{buildroot}%{_sysconfdir}/sysconfig/kubelet
mkdir -p %{buildroot}%{_libexecdir}/modules-load.d
mkdir -p %{buildroot}%{_sysctldir}
install -p -m 0644 -t %{buildroot}%{_libexecdir}/modules-load.d/ {kubeadm.conf}
install -p -m 0644 -t %{buildroot}%{_sysctldir} %{50-kubeadm.conf}
%files
%{_bindir}/kubeadm
%{_sysconfdir}/systemd/system/kubelet.service.d/10-kubeadm.conf
%{_sysconfdir}/sysconfig/kubelet
%dir %{_libexecdir}/modules-load.d
%{_libexecdir}/modules-load.d/kubeadm.conf
%{_sysctldir}/50-kubeadm.conf

View File

@ -23,9 +23,9 @@ install -m 755 -d %{buildroot}%{_bindir}
install -m 755 -d %{buildroot}%{_sysconfdir}/systemd/system/
install -m 755 -d %{buildroot}%{_sysconfdir}/kubernetes/manifests/
install -p -m 755 -t %{buildroot}%{_bindir} {kubelet}
install -p -m 755 -t %{buildroot}%{_sysconfdir}/systemd/system/ {kubelet.service}
install -p -m 644 -t %{buildroot}%{_sysconfdir}/systemd/system/ {kubelet.service}
%files
%{_bindir}/kubelet
%{_sysconfdir}/systemd/system/kubelet.service
%attr(644,-,-) %{_sysconfdir}/systemd/system/kubelet.service
%{_sysconfdir}/kubernetes/manifests/

View File

@ -40,6 +40,7 @@ package_group(
"//hack",
"//hack/lib",
"//hack/make-rules",
"//test/cmd",
"//test/e2e/...",
"//test/integration/...",
],
@ -133,6 +134,7 @@ package_group(
name = "pkg_kubectl_cmd_create_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/edit",
],
)
@ -159,7 +161,7 @@ package_group(
)
package_group(
name = "pkg_kubectl_cmd_templates_CONSUMERS",
name = "pkg_kubectl_util_templates_CONSUMERS",
includes = [
":COMMON_generators",
":COMMON_testing",
@ -168,23 +170,56 @@ package_group(
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/annotate",
"//pkg/kubectl/cmd/apiresources",
"//pkg/kubectl/cmd/apply",
"//pkg/kubectl/cmd/attach",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/autoscale",
"//pkg/kubectl/cmd/certificates",
"//pkg/kubectl/cmd/clusterinfo",
"//pkg/kubectl/cmd/completion",
"//pkg/kubectl/cmd/config",
"//pkg/kubectl/cmd/convert",
"//pkg/kubectl/cmd/cp",
"//pkg/kubectl/cmd/create",
"//pkg/kubectl/cmd/delete",
"//pkg/kubectl/cmd/describe",
"//pkg/kubectl/cmd/diff",
"//pkg/kubectl/cmd/drain",
"//pkg/kubectl/cmd/edit",
"//pkg/kubectl/cmd/exec",
"//pkg/kubectl/cmd/explain",
"//pkg/kubectl/cmd/expose",
"//pkg/kubectl/cmd/get",
"//pkg/kubectl/cmd/help",
"//pkg/kubectl/cmd/label",
"//pkg/kubectl/cmd/logs",
"//pkg/kubectl/cmd/options",
"//pkg/kubectl/cmd/patch",
"//pkg/kubectl/cmd/plugin",
"//pkg/kubectl/cmd/portforward",
"//pkg/kubectl/cmd/proxy",
"//pkg/kubectl/cmd/replace",
"//pkg/kubectl/cmd/rollingupdate",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/run",
"//pkg/kubectl/cmd/scale",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/templates",
"//pkg/kubectl/cmd/taint",
"//pkg/kubectl/cmd/top",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/cmd/util/sanity",
"//pkg/kubectl/cmd/version",
"//pkg/kubectl/cmd/wait",
"//pkg/kubectl/util",
],
)
package_group(
name = "pkg_kubectl_cmd_testdata_edit_CONSUMERS",
name = "pkg_kubectl_cmd_edit_testdata_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/edit",
],
)
@ -192,12 +227,33 @@ package_group(
name = "pkg_kubectl_cmd_testing_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/annotate",
"//pkg/kubectl/cmd/apply",
"//pkg/kubectl/cmd/attach",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/clusterinfo",
"//pkg/kubectl/cmd/convert",
"//pkg/kubectl/cmd/cp",
"//pkg/kubectl/cmd/create",
"//pkg/kubectl/cmd/delete",
"//pkg/kubectl/cmd/describe",
"//pkg/kubectl/cmd/drain",
"//pkg/kubectl/cmd/edit",
"//pkg/kubectl/cmd/exec",
"//pkg/kubectl/cmd/expose",
"//pkg/kubectl/cmd/get",
"//pkg/kubectl/cmd/label",
"//pkg/kubectl/cmd/logs",
"//pkg/kubectl/cmd/patch",
"//pkg/kubectl/cmd/portforward",
"//pkg/kubectl/cmd/replace",
"//pkg/kubectl/cmd/rollingupdate",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/run",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/wait",
"//pkg/kubectl/cmd/taint",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/top",
"//pkg/kubectl/explain",
],
)
@ -213,15 +269,48 @@ package_group(
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/annotate",
"//pkg/kubectl/cmd/apiresources",
"//pkg/kubectl/cmd/apply",
"//pkg/kubectl/cmd/attach",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/autoscale",
"//pkg/kubectl/cmd/certificates",
"//pkg/kubectl/cmd/clusterinfo",
"//pkg/kubectl/cmd/completion",
"//pkg/kubectl/cmd/config",
"//pkg/kubectl/cmd/convert",
"//pkg/kubectl/cmd/cp",
"//pkg/kubectl/cmd/create",
"//pkg/kubectl/cmd/delete",
"//pkg/kubectl/cmd/describe",
"//pkg/kubectl/cmd/diff",
"//pkg/kubectl/cmd/drain",
"//pkg/kubectl/cmd/edit",
"//pkg/kubectl/cmd/exec",
"//pkg/kubectl/cmd/explain",
"//pkg/kubectl/cmd/expose",
"//pkg/kubectl/cmd/get",
"//pkg/kubectl/cmd/help",
"//pkg/kubectl/cmd/label",
"//pkg/kubectl/cmd/logs",
"//pkg/kubectl/cmd/patch",
"//pkg/kubectl/cmd/plugin",
"//pkg/kubectl/cmd/portforward",
"//pkg/kubectl/cmd/proxy",
"//pkg/kubectl/cmd/replace",
"//pkg/kubectl/cmd/rollingupdate",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/run",
"//pkg/kubectl/cmd/scale",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/taint",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/top",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/cmd/util/editor",
"//pkg/kubectl/cmd/util/sanity",
"//pkg/kubectl/cmd/version",
"//pkg/kubectl/cmd/wait",
],
)
@ -230,7 +319,9 @@ package_group(
name = "pkg_kubectl_cmd_util_editor_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/apply",
"//pkg/kubectl/cmd/create",
"//pkg/kubectl/cmd/edit",
"//pkg/kubectl/cmd/util",
],
)
@ -261,6 +352,7 @@ package_group(
"//cmd/kubectl/app",
"//pkg/kubectl",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/top",
],
)
@ -299,7 +391,9 @@ package_group(
name = "pkg_kubectl_util_CONSUMERS",
packages = [
"//pkg/kubectl",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/...",
"//pkg/kubectl/generate",
"//pkg/kubectl/generate/versioned",
"//pkg/kubectl/proxy",
],
)
@ -309,9 +403,12 @@ package_group(
packages = [
"//pkg/kubectl",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/apply",
"//pkg/kubectl/cmd/convert",
"//pkg/kubectl/cmd/replace",
"//pkg/kubectl/cmd/rollingupdate",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/resource",
],
)

View File

@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CRI_TOOLS_VERSION = "1.0.0-beta.1"
CRI_TOOLS_VERSION = "1.12.0"