mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
2
vendor/k8s.io/kubernetes/cluster/juju/.gitignore
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/cluster/juju/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
builds
|
||||
deps
|
21
vendor/k8s.io/kubernetes/cluster/juju/OWNERS
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/cluster/juju/OWNERS
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
approvers:
|
||||
- castrojo
|
||||
- chuckbutler
|
||||
- marcoceppi
|
||||
- mbruzek
|
||||
- Cynerva
|
||||
- ktsakalozos
|
||||
reviewers:
|
||||
- chuckbutler
|
||||
- marcoceppi
|
||||
- mbruzek
|
||||
- thockin
|
||||
- mikedanese
|
||||
- eparis
|
||||
- jlowdermilk
|
||||
- david-mcmahon
|
||||
- Cynerva
|
||||
- wwwtyro
|
||||
- tvansteenburgh
|
||||
- ktsakalozos
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/juju/bundles/OWNERS
generated
vendored
Executable file
2
vendor/k8s.io/kubernetes/cluster/juju/bundles/OWNERS
generated
vendored
Executable file
@ -0,0 +1,2 @@
|
||||
reviewers:
|
||||
- jlowdermilk
|
208
vendor/k8s.io/kubernetes/cluster/juju/bundles/README.md
generated
vendored
Normal file
208
vendor/k8s.io/kubernetes/cluster/juju/bundles/README.md
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
# kubernetes-bundle
|
||||
|
||||
The kubernetes-bundle allows you to deploy the many services of
|
||||
Kubernetes to a cloud environment and get started using the Kubernetes
|
||||
technology quickly.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
Kubernetes is an open source system for managing containerized
|
||||
applications. Kubernetes uses [Docker](http://docker.com) to run
|
||||
containerized applications.
|
||||
|
||||
## Juju TL;DR
|
||||
|
||||
The [Juju](https://jujucharms.com) system provides provisioning and
|
||||
orchestration across a variety of clouds and bare metal. A juju bundle
|
||||
describes collection of services and how they interrelate. `juju
|
||||
quickstart` allows you to bootstrap a deployment environment and
|
||||
deploy a bundle.
|
||||
|
||||
## Dive in!
|
||||
|
||||
#### Install Juju Quickstart
|
||||
|
||||
You will need to
|
||||
[install the Juju client](https://jujucharms.com/get-started) and
|
||||
`juju-quickstart` as prerequisites. To deploy the bundle use
|
||||
`juju-quickstart` which runs on Mac OS (`brew install
|
||||
juju-quickstart`) or Ubuntu (`apt-get install juju-quickstart`).
|
||||
|
||||
### Deploy a Kubernetes Bundle
|
||||
|
||||
Use the 'juju quickstart' command to deploy a Kubernetes cluster to any cloud
|
||||
supported by Juju.
|
||||
|
||||
The charm store version of the Kubernetes bundle can be deployed as follows:
|
||||
|
||||
juju quickstart u/kubernetes/kubernetes-cluster
|
||||
|
||||
> Note: The charm store bundle may be locked to a specific Kubernetes release.
|
||||
|
||||
Alternately you could deploy a Kubernetes bundle straight from github or a file:
|
||||
|
||||
juju quickstart -i https://raw.githubusercontent.com/whitmo/bundle-kubernetes/master/bundles.yaml
|
||||
|
||||
The command above does few things for you:
|
||||
|
||||
- Starts a curses based gui for managing your cloud or MAAS credentials
|
||||
- Looks for a bootstrapped deployment environment, and bootstraps if
|
||||
required. This will launch a bootstrap node in your chosen
|
||||
deployment environment (machine 0).
|
||||
- Deploys the Juju GUI to your environment onto the bootstrap node.
|
||||
- Provisions 4 machines, and deploys the Kubernetes services on top of
|
||||
them (Kubernetes-master, two Kubernetes nodes using flannel, and etcd).
|
||||
- Orchestrates the relations among the services, and exits.
|
||||
|
||||
Now you should have a running Kubernetes. Run `juju status
|
||||
--format=oneline` to see the address of your kubernetes-master unit.
|
||||
|
||||
For further reading on [Juju Quickstart](https://pypi.python.org/pypi/juju-quickstart)
|
||||
|
||||
Go to the [Getting started with Juju guide](https://kubernetes.io/docs/getting-started-guides/ubuntu/installation/#setting-up-kubernetes-with-juju)
|
||||
for more information about deploying a development Kubernetes cluster.
|
||||
|
||||
### Using the Kubernetes Client
|
||||
|
||||
You'll need the Kubernetes command line client,
|
||||
[kubectl](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl/kubectl.md)
|
||||
to interact with the created cluster. The kubectl command is
|
||||
installed on the kubernetes-master charm. If you want to work with
|
||||
the cluster from your computer you will need to install the binary
|
||||
locally.
|
||||
|
||||
You can access kubectl by a number ways using juju.
|
||||
|
||||
via juju run:
|
||||
|
||||
juju run --service kubernetes-master/0 "sudo kubectl get nodes"
|
||||
|
||||
via juju ssh:
|
||||
|
||||
juju ssh kubernetes-master/0 -t "sudo kubectl get nodes"
|
||||
|
||||
You may also SSH to the kubernetes-master unit (`juju ssh kubernetes-master/0`)
|
||||
and call kubectl from the command prompt.
|
||||
|
||||
See the
|
||||
[kubectl documentation](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl/kubectl.md)
|
||||
for more details of what can be done with the command line tool.
|
||||
|
||||
### Scaling up the cluster
|
||||
|
||||
You can add capacity by adding more Docker units:
|
||||
|
||||
juju add-unit docker
|
||||
|
||||
### Known Limitations
|
||||
|
||||
Kubernetes currently has several platform specific functionality. For
|
||||
example load balancers and persistence volumes only work with the
|
||||
Google Compute provider at this time.
|
||||
|
||||
The Juju integration uses the Kubernetes null provider. This means
|
||||
external load balancers and storage can't be directly driven through
|
||||
Kubernetes config files at this time. We look forward to adding these
|
||||
capabilities to the charms.
|
||||
|
||||
|
||||
## More about the components the bundle deploys
|
||||
|
||||
### Kubernetes master
|
||||
|
||||
The master controls the Kubernetes cluster. It manages for the worker
|
||||
nodes and provides the primary interface for control by the user.
|
||||
|
||||
### Kubernetes node
|
||||
|
||||
The nodes are the servers that perform the work. Nodes must
|
||||
communicate with the master and run the workloads that are assigned to
|
||||
them.
|
||||
|
||||
### Flannel-docker
|
||||
|
||||
Flannel provides individual subnets for each machine in the cluster by
|
||||
creating a
|
||||
[software defined networking](http://en.wikipedia.org/wiki/Software-defined_networking).
|
||||
|
||||
### Docker
|
||||
|
||||
An open platform for distributed applications for developers and sysadmins.
|
||||
|
||||
### Etcd
|
||||
|
||||
Etcd persists state for Flannel and Kubernetes. It is a distributed
|
||||
key-value store with an http interface.
|
||||
|
||||
|
||||
## For further information on getting started with Juju
|
||||
|
||||
Juju has complete documentation with regard to setup, and cloud
|
||||
configuration on it's own
|
||||
[documentation site](https://jujucharms.com/docs/).
|
||||
|
||||
- [Getting Started](https://jujucharms.com/docs/stable/getting-started)
|
||||
- [Using Juju](https://jujucharms.com/docs/stable/charms)
|
||||
|
||||
|
||||
## Installing the kubectl outside of kubernetes-master unit
|
||||
|
||||
Download the Kubernetes release from:
|
||||
https://github.com/kubernetes/kubernetes/releases and extract
|
||||
the release, you can then just directly use the cli binary at
|
||||
./kubernetes/platforms/linux/amd64/kubectl
|
||||
|
||||
You'll need the address of the kubernetes-master as environment variable :
|
||||
|
||||
juju status kubernetes-master/0
|
||||
|
||||
Grab the public-address there and export it as KUBERNETES_MASTER
|
||||
environment variable :
|
||||
|
||||
export KUBERNETES_MASTER=$(juju status --format=oneline kubernetes-master | grep kubernetes-master | cut -d' ' -f3):8080
|
||||
|
||||
And now you can run kubectl on the command line :
|
||||
|
||||
kubectl get no
|
||||
|
||||
See the
|
||||
[kubectl documentation](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl/kubectl.md)
|
||||
for more details of what can be done with the command line tool.
|
||||
|
||||
|
||||
## Hacking on the kubernetes-bundle and associated charms
|
||||
|
||||
The kubernetes-bundle is open source and available on github.com. If
|
||||
you want to get started developing on the bundle you can clone it from
|
||||
github.
|
||||
|
||||
git clone https://github.com/kubernetes/kubernetes.git
|
||||
|
||||
Go to the [Getting started with Juju guide](https://kubernetes.io/docs/getting-started-guides/ubuntu/installation/#setting-up-kubernetes-with-juju)
|
||||
for more information about the bundle or charms.
|
||||
|
||||
## How to contribute
|
||||
|
||||
Send us pull requests! We'll send you a cookie if they include tests and docs.
|
||||
|
||||
|
||||
## Current and Most Complete Information
|
||||
|
||||
The charms and bundles are in the [kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
repository in github.
|
||||
|
||||
- [kubernetes-master charm on GitHub](https://github.com/kubernetes/kubernetes/tree/master/cluster/juju/charms/trusty/kubernetes-master)
|
||||
- [kubernetes charm on GitHub](https://github.com/kubernetes/kubernetes/tree/master/cluster/juju/charms/trusty/kubernetes)
|
||||
|
||||
|
||||
More information about the
|
||||
[Kubernetes project](https://github.com/kubernetes/kubernetes)
|
||||
or check out the
|
||||
[Kubernetes Documentation](https://github.com/kubernetes/kubernetes/tree/master/docs)
|
||||
for more details about the Kubernetes concepts and terminology.
|
||||
|
||||
Having a problem? Check the [Kubernetes issues database](https://github.com/kubernetes/kubernetes/issues)
|
||||
for related issues.
|
||||
|
||||
|
||||
[]()
|
18
vendor/k8s.io/kubernetes/cluster/juju/bundles/local.yaml.base
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/cluster/juju/bundles/local.yaml.base
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
services:
|
||||
kubernetes:
|
||||
charm: __CHARM_DIR__/builds/kubernetes
|
||||
annotations:
|
||||
"gui-x": "600"
|
||||
"gui-y": "0"
|
||||
expose: true
|
||||
num_units: 2
|
||||
etcd:
|
||||
charm: cs:~containers/etcd
|
||||
annotations:
|
||||
"gui-x": "300"
|
||||
"gui-y": "0"
|
||||
num_units: 1
|
||||
relations:
|
||||
- - "kubernetes:etcd"
|
||||
- "etcd:db"
|
||||
series: xenial
|
16
vendor/k8s.io/kubernetes/cluster/juju/config-default.sh
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/juju/config-default.sh
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
17
vendor/k8s.io/kubernetes/cluster/juju/config-test.sh
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/cluster/juju/config-test.sh
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NUM_NODES=${NUM_NODES:-2}
|
31
vendor/k8s.io/kubernetes/cluster/juju/identify-leaders.py
generated
vendored
Executable file
31
vendor/k8s.io/kubernetes/cluster/juju/identify-leaders.py
generated
vendored
Executable file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from subprocess import check_output
|
||||
import yaml
|
||||
|
||||
|
||||
cmd = ['juju', 'run', '--application', 'kubernetes', '--format=yaml', 'is-leader']
|
||||
out = check_output(cmd)
|
||||
try:
|
||||
parsed_output = yaml.safe_load(out)
|
||||
for unit in parsed_output:
|
||||
standard_out = unit['Stdout'].rstrip()
|
||||
unit_id = unit['UnitId']
|
||||
if 'True' in standard_out:
|
||||
print(unit_id)
|
||||
except:
|
||||
pass
|
4
vendor/k8s.io/kubernetes/cluster/juju/kube-system-ns.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/juju/kube-system-ns.yaml
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
5
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/README.md
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# kubeapi-load-balancer
|
||||
|
||||
Simple NGINX reverse proxy to lend a hand in HA kubernetes-master deployments.
|
||||
|
||||
|
11
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/config.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/config.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
options:
|
||||
port:
|
||||
type: int
|
||||
default: 443
|
||||
description: The port to run the loadbalancer
|
||||
extra_sans:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space-separated list of extra SAN entries to add to the x509 certificate
|
||||
created for the load balancers.
|
13
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/copyright
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/copyright
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
412
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/icon.svg
generated
vendored
Normal file
412
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/icon.svg
generated
vendored
Normal file
@ -0,0 +1,412 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="96"
|
||||
height="96"
|
||||
id="svg6517"
|
||||
version="1.1"
|
||||
inkscape:version="0.91 r13725"
|
||||
sodipodi:docname="kubapi-load-balancer_circle.svg"
|
||||
viewBox="0 0 96 96">
|
||||
<defs
|
||||
id="defs6519">
|
||||
<linearGradient
|
||||
id="Background">
|
||||
<stop
|
||||
id="stop4178"
|
||||
offset="0"
|
||||
style="stop-color:#22779e;stop-opacity:1" />
|
||||
<stop
|
||||
id="stop4180"
|
||||
offset="1"
|
||||
style="stop-color:#2991c0;stop-opacity:1" />
|
||||
</linearGradient>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
inkscape:label="Inner Shadow"
|
||||
id="filter1121">
|
||||
<feFlood
|
||||
flood-opacity="0.59999999999999998"
|
||||
flood-color="rgb(0,0,0)"
|
||||
result="flood"
|
||||
id="feFlood1123" />
|
||||
<feComposite
|
||||
in="flood"
|
||||
in2="SourceGraphic"
|
||||
operator="out"
|
||||
result="composite1"
|
||||
id="feComposite1125" />
|
||||
<feGaussianBlur
|
||||
in="composite1"
|
||||
stdDeviation="1"
|
||||
result="blur"
|
||||
id="feGaussianBlur1127" />
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="2"
|
||||
result="offset"
|
||||
id="feOffset1129" />
|
||||
<feComposite
|
||||
in="offset"
|
||||
in2="SourceGraphic"
|
||||
operator="atop"
|
||||
result="composite2"
|
||||
id="feComposite1131" />
|
||||
</filter>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
inkscape:label="Drop Shadow"
|
||||
id="filter950">
|
||||
<feFlood
|
||||
flood-opacity="0.25"
|
||||
flood-color="rgb(0,0,0)"
|
||||
result="flood"
|
||||
id="feFlood952" />
|
||||
<feComposite
|
||||
in="flood"
|
||||
in2="SourceGraphic"
|
||||
operator="in"
|
||||
result="composite1"
|
||||
id="feComposite954" />
|
||||
<feGaussianBlur
|
||||
in="composite1"
|
||||
stdDeviation="1"
|
||||
result="blur"
|
||||
id="feGaussianBlur956" />
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="1"
|
||||
result="offset"
|
||||
id="feOffset958" />
|
||||
<feComposite
|
||||
in="SourceGraphic"
|
||||
in2="offset"
|
||||
operator="over"
|
||||
result="composite2"
|
||||
id="feComposite960" />
|
||||
</filter>
|
||||
<clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath873">
|
||||
<g
|
||||
transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
|
||||
id="g875"
|
||||
inkscape:label="Layer 1"
|
||||
style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none">
|
||||
<path
|
||||
style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"
|
||||
d="M 46.702703,898.22775 H 97.297297 C 138.16216,898.22775 144,904.06497 144,944.92583 v 50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 H 46.702703 C 5.8378378,1042.3622 0,1036.525 0,995.66429 v -50.73846 c 0,-40.86086 5.8378378,-46.69808 46.702703,-46.69808 z"
|
||||
id="path877"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="sssssssss" />
|
||||
</g>
|
||||
</clipPath>
|
||||
<style
|
||||
id="style867"
|
||||
type="text/css"><![CDATA[
|
||||
.fil0 {fill:#1F1A17}
|
||||
]]></style>
|
||||
<clipPath
|
||||
id="clipPath16">
|
||||
<path
|
||||
id="path18"
|
||||
d="M -9,-9 H 605 V 222 H -9 Z"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath116">
|
||||
<path
|
||||
id="path118"
|
||||
d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath128">
|
||||
<path
|
||||
id="path130"
|
||||
d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<linearGradient
|
||||
id="linearGradient3850"
|
||||
inkscape:collect="always">
|
||||
<stop
|
||||
id="stop3852"
|
||||
offset="0"
|
||||
style="stop-color:#000000;stop-opacity:1;" />
|
||||
<stop
|
||||
id="stop3854"
|
||||
offset="1"
|
||||
style="stop-color:#000000;stop-opacity:0;" />
|
||||
</linearGradient>
|
||||
<clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath3095">
|
||||
<path
|
||||
d="M 976.648,389.551 H 134.246 V 1229.55 H 976.648 V 389.551"
|
||||
id="path3097"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath3195">
|
||||
<path
|
||||
d="m 611.836,756.738 -106.34,105.207 c -8.473,8.289 -13.617,20.102 -13.598,33.379 L 598.301,790.207 c -0.031,-13.418 5.094,-25.031 13.535,-33.469"
|
||||
id="path3197"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath3235">
|
||||
<path
|
||||
d="m 1095.64,1501.81 c 35.46,-35.07 70.89,-70.11 106.35,-105.17 4.4,-4.38 7.11,-10.53 7.11,-17.55 l -106.37,105.21 c 0,7 -2.71,13.11 -7.09,17.51"
|
||||
id="path3237"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath4591"
|
||||
clipPathUnits="userSpaceOnUse">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1106.6009,730.43734 -0.036,21.648 c -0.01,3.50825 -2.8675,6.61375 -6.4037,6.92525 l -83.6503,7.33162 c -3.5205,0.30763 -6.3812,-2.29987 -6.3671,-5.8145 l 0.036,-21.6475 20.1171,-1.76662 -0.011,4.63775 c 0,1.83937 1.4844,3.19925 3.3262,3.0395 l 49.5274,-4.33975 c 1.8425,-0.166 3.3425,-1.78125 3.3538,-3.626 l 0.01,-4.63025 20.1,-1.7575"
|
||||
style="fill:#ff00ff;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
id="path4593" />
|
||||
</clipPath>
|
||||
<radialGradient
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
|
||||
r="20.40658"
|
||||
fy="93.399292"
|
||||
fx="-26.508606"
|
||||
cy="93.399292"
|
||||
cx="-26.508606"
|
||||
id="radialGradient3856"
|
||||
xlink:href="#linearGradient3850"
|
||||
inkscape:collect="always" />
|
||||
<linearGradient
|
||||
gradientTransform="translate(-318.48033,212.32022)"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
y2="993.19702"
|
||||
x2="-51.879555"
|
||||
y1="593.11615"
|
||||
x1="348.20132"
|
||||
id="linearGradient3895"
|
||||
xlink:href="#linearGradient3850"
|
||||
inkscape:collect="always" />
|
||||
<clipPath
|
||||
id="clipPath3906"
|
||||
clipPathUnits="userSpaceOnUse">
|
||||
<rect
|
||||
transform="scale(1,-1)"
|
||||
style="color:#000000;display:inline;overflow:visible;visibility:visible;opacity:0.8;fill:#ff00ff;stroke:none;stroke-width:4;marker:none;enable-background:accumulate"
|
||||
id="rect3908"
|
||||
width="1019.1371"
|
||||
height="1019.1371"
|
||||
x="357.9816"
|
||||
y="-1725.8152" />
|
||||
</clipPath>
|
||||
</defs>
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="7.9580781"
|
||||
inkscape:cx="-61.002332"
|
||||
inkscape:cy="48.450019"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1029"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="24"
|
||||
inkscape:window-maximized="1"
|
||||
showborder="true"
|
||||
showguides="false"
|
||||
inkscape:guide-bbox="true"
|
||||
inkscape:showpageshadow="false"
|
||||
inkscape:snap-global="false"
|
||||
inkscape:snap-bbox="true"
|
||||
inkscape:bbox-paths="true"
|
||||
inkscape:bbox-nodes="true"
|
||||
inkscape:snap-bbox-edge-midpoints="true"
|
||||
inkscape:snap-bbox-midpoints="true"
|
||||
inkscape:object-paths="true"
|
||||
inkscape:snap-intersection-paths="true"
|
||||
inkscape:object-nodes="true"
|
||||
inkscape:snap-smooth-nodes="true"
|
||||
inkscape:snap-midpoints="true"
|
||||
inkscape:snap-object-midpoints="true"
|
||||
inkscape:snap-center="true"
|
||||
inkscape:snap-nodes="true"
|
||||
inkscape:snap-others="true"
|
||||
inkscape:snap-page="true">
|
||||
<inkscape:grid
|
||||
type="xygrid"
|
||||
id="grid821" />
|
||||
<sodipodi:guide
|
||||
orientation="1,0"
|
||||
position="16,48"
|
||||
id="guide823"
|
||||
inkscape:locked="false" />
|
||||
<sodipodi:guide
|
||||
orientation="0,1"
|
||||
position="64,80"
|
||||
id="guide825"
|
||||
inkscape:locked="false" />
|
||||
<sodipodi:guide
|
||||
orientation="1,0"
|
||||
position="80,40"
|
||||
id="guide827"
|
||||
inkscape:locked="false" />
|
||||
<sodipodi:guide
|
||||
orientation="0,1"
|
||||
position="64,16"
|
||||
id="guide829"
|
||||
inkscape:locked="false" />
|
||||
</sodipodi:namedview>
|
||||
<metadata
|
||||
id="metadata6522">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="BACKGROUND"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(268,-635.29076)"
|
||||
style="display:inline">
|
||||
<path
|
||||
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:none"
|
||||
d="M 48 0 A 48 48 0 0 0 0 48 A 48 48 0 0 0 48 96 A 48 48 0 0 0 96 48 A 48 48 0 0 0 48 0 z "
|
||||
id="path6455"
|
||||
transform="translate(-268,635.29076)" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
style="display:inline;fill:#326de6;fill-opacity:1;stroke:none"
|
||||
d="m -220,635.29076 a 48,48 0 0 0 -48,48 48,48 0 0 0 48,48 48,48 0 0 0 48,-48 48,48 0 0 0 -48,-48 z"
|
||||
id="path6455-3" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:sans-serif;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;white-space:normal;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#326de6;fill-opacity:1;fill-rule:nonzero;stroke:#ffffff;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
|
||||
d="m -257.13275,693.64544 a 5.0524169,5.01107 0 0 0 0.28787,0.39638 l 18.28736,22.73877 a 5.0524169,5.01107 0 0 0 3.95007,1.88616 l 29.32654,-0.003 a 5.0524169,5.01107 0 0 0 3.94943,-1.88675 l 18.28255,-22.74294 a 5.0524169,5.01107 0 0 0 0.97485,-4.2391 l -6.52857,-28.3566 a 5.0524169,5.01107 0 0 0 -2.73381,-3.39906 l -26.4238,-12.61752 a 5.0524169,5.01107 0 0 0 -4.38381,4.3e-4 l -26.42114,12.62305 a 5.0524169,5.01107 0 0 0 -2.73296,3.39983 l -6.52262,28.35798 a 5.0524169,5.01107 0 0 0 0.68804,3.84268 z"
|
||||
id="path4809" />
|
||||
<path
|
||||
style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:sans-serif;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;white-space:normal;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
|
||||
d="M 47.976562,17.478516 C 47.148902,17.491446 46.488127,18.172324 46.5,19 l -1,27.669922 -10.861328,24.701172 c -0.91351,1.842103 1.912154,3.147502 2.722656,1.257812 L 48,53.08 58.638672,72.628906 c 0.810502,1.88969 3.636166,0.584291 2.722656,-1.257812 L 50.5,46.671875 49.5,19 c 0.01214,-0.846036 -0.677418,-1.534706 -1.523438,-1.521484 z"
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4207"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="scccccccccs" />
|
||||
<path
|
||||
sodipodi:type="star"
|
||||
style="color:#000000;display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;enable-background:accumulate"
|
||||
id="path4218"
|
||||
sodipodi:sides="3"
|
||||
sodipodi:cx="-232"
|
||||
sodipodi:cy="706.29077"
|
||||
sodipodi:r1="5.8309517"
|
||||
sodipodi:r2="2.9154758"
|
||||
sodipodi:arg1="0.52359878"
|
||||
sodipodi:arg2="1.5707963"
|
||||
inkscape:flatsided="true"
|
||||
inkscape:rounded="0"
|
||||
inkscape:randomized="0"
|
||||
d="m -226.95025,709.20625 -10.0995,0 5.04975,-8.74643 z"
|
||||
inkscape:transform-center-y="0.28435141"
|
||||
transform="matrix(-1.1408434,-0.54609465,0.54609465,-1.1408434,-881.23628,1383.8624)"
|
||||
inkscape:transform-center-x="-1.5920962" />
|
||||
<path
|
||||
inkscape:transform-center-x="1.5920987"
|
||||
transform="matrix(1.1408434,-0.54609465,-0.54609465,-1.1408434,441.40253,1383.8624)"
|
||||
inkscape:transform-center-y="0.28435141"
|
||||
d="m -226.95025,709.20625 -10.0995,0 5.04975,-8.74643 z"
|
||||
inkscape:randomized="0"
|
||||
inkscape:rounded="0"
|
||||
inkscape:flatsided="true"
|
||||
sodipodi:arg2="1.5707963"
|
||||
sodipodi:arg1="0.52359878"
|
||||
sodipodi:r2="2.9154758"
|
||||
sodipodi:r1="5.8309517"
|
||||
sodipodi:cy="706.29077"
|
||||
sodipodi:cx="-232"
|
||||
sodipodi:sides="3"
|
||||
id="path4225"
|
||||
style="color:#000000;display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;enable-background:accumulate"
|
||||
sodipodi:type="star" />
|
||||
<path
|
||||
style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:sans-serif;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;white-space:normal;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
|
||||
d="m -220.61945,681.04476 -19.90428,4.99988 -1.00407,-4.6236 -9.45532,8.58951 12.16601,3.8932 -0.95421,-4.40019 15.62258,-2.55254 6.59459,-1.95609 z"
|
||||
id="path4227"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccccccccc" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccccccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path4234"
|
||||
d="m -219.4808,681.04476 19.90428,4.99988 1.00407,-4.6236 9.45532,8.58951 -12.16601,3.8932 0.95421,-4.40019 -15.62258,-2.55254 -6.59459,-1.95609 z"
|
||||
style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:sans-serif;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;white-space:normal;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate" />
|
||||
</g>
|
||||
<g
|
||||
inkscape:groupmode="layer"
|
||||
id="layer3"
|
||||
inkscape:label="PLACE YOUR PICTOGRAM HERE"
|
||||
style="display:inline">
|
||||
<g
|
||||
id="g4185" />
|
||||
</g>
|
||||
<style
|
||||
id="style4217"
|
||||
type="text/css">
|
||||
.st0{fill:#419EDA;}
|
||||
</style>
|
||||
<style
|
||||
id="style4285"
|
||||
type="text/css">
|
||||
.st0{clip-path:url(#SVGID_2_);fill:#EFBF1B;}
|
||||
.st1{clip-path:url(#SVGID_2_);fill:#40BEB0;}
|
||||
.st2{clip-path:url(#SVGID_2_);fill:#0AA5DE;}
|
||||
.st3{clip-path:url(#SVGID_2_);fill:#231F20;}
|
||||
.st4{fill:#D7A229;}
|
||||
.st5{fill:#009B8F;}
|
||||
</style>
|
||||
<style
|
||||
id="style4240"
|
||||
type="text/css">
|
||||
.st0{fill:#E8478B;}
|
||||
.st1{fill:#40BEB0;}
|
||||
.st2{fill:#37A595;}
|
||||
.st3{fill:#231F20;}
|
||||
</style>
|
||||
<style
|
||||
id="style4812"
|
||||
type="text/css">
|
||||
.st0{fill:#0AA5DE;}
|
||||
.st1{fill:#40BEB0;}
|
||||
.st2{opacity:0.26;fill:#353535;}
|
||||
.st3{fill:#231F20;}
|
||||
</style>
|
||||
</svg>
|
After Width: | Height: | Size: 20 KiB |
14
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/layer.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/layer.yaml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
repo: https://github.com/kubernetes/kubernetes.git
|
||||
includes:
|
||||
- 'layer:metrics'
|
||||
- 'layer:nagios'
|
||||
- 'layer:nginx'
|
||||
- 'layer:tls-client'
|
||||
- 'interface:public-address'
|
||||
options:
|
||||
tls-client:
|
||||
ca_certificate_path: '/srv/kubernetes/ca.crt'
|
||||
server_certificate_path: '/srv/kubernetes/server.crt'
|
||||
server_key_path: '/srv/kubernetes/server.key'
|
||||
client_certificate_path: '/srv/kubernetes/client.crt'
|
||||
client_key_path: '/srv/kubernetes/client.key'
|
22
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/metadata.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/metadata.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
name: kubeapi-load-balancer
|
||||
summary: Nginx Load Balancer
|
||||
maintainers:
|
||||
- Tim Van Steenburgh <tim.van.steenburgh@canonical.com>
|
||||
- George Kraft <george.kraft@canonical.com>
|
||||
- Rye Terrell <rye.terrell@canonical.com>
|
||||
- Konstantinos Tsakalozos <kos.tsakalozos@canonical.com>
|
||||
- Charles Butler <Chuck@dasroot.net>
|
||||
- Matthew Bruzek <mbruzek@ubuntu.com>
|
||||
description: |
|
||||
A round robin Nginx load balancer to distribute traffic for kubernetes apiservers.
|
||||
tags:
|
||||
- misc
|
||||
subordinate: false
|
||||
series:
|
||||
- xenial
|
||||
requires:
|
||||
apiserver:
|
||||
interface: http
|
||||
provides:
|
||||
loadbalancer:
|
||||
interface: public-address
|
2
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/metrics.yaml
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/metrics.yaml
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
metrics:
|
||||
juju-units: {}
|
196
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py
generated
vendored
Normal file
196
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
from charms import layer
|
||||
from charms.reactive import when, when_any, when_not
|
||||
from charms.reactive import set_state, remove_state
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.contrib.charmsupport import nrpe
|
||||
from charms.reactive.helpers import data_changed
|
||||
|
||||
from charms.layer import nginx
|
||||
from charms.layer import tls_client
|
||||
|
||||
from subprocess import Popen
|
||||
from subprocess import PIPE
|
||||
from subprocess import STDOUT
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
|
||||
@when('certificates.available')
|
||||
def request_server_certificates(tls):
|
||||
'''Send the data that is required to create a server certificate for
|
||||
this server.'''
|
||||
# Use the public ip of this unit as the Common Name for the certificate.
|
||||
common_name = hookenv.unit_public_ip()
|
||||
# Create SANs that the tls layer will add to the server cert.
|
||||
sans = [
|
||||
hookenv.unit_public_ip(),
|
||||
hookenv.unit_private_ip(),
|
||||
socket.gethostname(),
|
||||
]
|
||||
# maybe they have extra names they want as SANs
|
||||
extra_sans = hookenv.config('extra_sans')
|
||||
if extra_sans and not extra_sans == "":
|
||||
sans.extend(extra_sans.split())
|
||||
# Create a path safe name by removing path characters from the unit name.
|
||||
certificate_name = hookenv.local_unit().replace('/', '_')
|
||||
# Request a server cert with this information.
|
||||
tls.request_server_cert(common_name, sans, certificate_name)
|
||||
|
||||
|
||||
@when('config.changed.extra_sans', 'certificates.available')
|
||||
def update_certificate(tls):
|
||||
# Using the config.changed.extra_sans flag to catch changes.
|
||||
# IP changes will take ~5 minutes or so to propagate, but
|
||||
# it will update.
|
||||
request_server_certificates(tls)
|
||||
|
||||
|
||||
@when('certificates.server.cert.available',
|
||||
'nginx.available', 'tls_client.server.certificate.written')
|
||||
def kick_nginx(tls):
|
||||
# we are just going to sighup it, but still want to avoid kicking it
|
||||
# without need
|
||||
if data_changed('cert', tls.get_server_cert()):
|
||||
# certificate changed, so sighup nginx
|
||||
hookenv.log("Certificate information changed, sending SIGHUP to nginx")
|
||||
host.service_restart('nginx')
|
||||
tls_client.reset_certificate_write_flag('server')
|
||||
|
||||
|
||||
@when('config.changed.port')
|
||||
def close_old_port():
|
||||
config = hookenv.config()
|
||||
old_port = config.previous('port')
|
||||
if not old_port:
|
||||
return
|
||||
try:
|
||||
hookenv.close_port(old_port)
|
||||
except CalledProcessError:
|
||||
hookenv.log('Port %d already closed, skipping.' % old_port)
|
||||
|
||||
|
||||
@when('nginx.available', 'apiserver.available',
|
||||
'certificates.server.cert.available')
|
||||
def install_load_balancer(apiserver, tls):
|
||||
''' Create the default vhost template for load balancing '''
|
||||
# Get the tls paths from the layer data.
|
||||
layer_options = layer.options('tls-client')
|
||||
server_cert_path = layer_options.get('server_certificate_path')
|
||||
cert_exists = server_cert_path and os.path.isfile(server_cert_path)
|
||||
server_key_path = layer_options.get('server_key_path')
|
||||
key_exists = server_key_path and os.path.isfile(server_key_path)
|
||||
# Do both the key and certificate exist?
|
||||
if cert_exists and key_exists:
|
||||
# At this point the cert and key exist, and they are owned by root.
|
||||
chown = ['chown', 'www-data:www-data', server_cert_path]
|
||||
|
||||
# Change the owner to www-data so the nginx process can read the cert.
|
||||
subprocess.call(chown)
|
||||
chown = ['chown', 'www-data:www-data', server_key_path]
|
||||
|
||||
# Change the owner to www-data so the nginx process can read the key.
|
||||
subprocess.call(chown)
|
||||
|
||||
port = hookenv.config('port')
|
||||
hookenv.open_port(port)
|
||||
services = apiserver.services()
|
||||
nginx.configure_site(
|
||||
'apilb',
|
||||
'apilb.conf',
|
||||
server_name='_',
|
||||
services=services,
|
||||
port=port,
|
||||
server_certificate=server_cert_path,
|
||||
server_key=server_key_path,
|
||||
)
|
||||
hookenv.status_set('active', 'Loadbalancer ready.')
|
||||
|
||||
|
||||
@when('nginx.available')
|
||||
def set_nginx_version():
|
||||
''' Surface the currently deployed version of nginx to Juju '''
|
||||
cmd = 'nginx -v'
|
||||
p = Popen(cmd, shell=True,
|
||||
stdin=PIPE,
|
||||
stdout=PIPE,
|
||||
stderr=STDOUT,
|
||||
close_fds=True)
|
||||
raw = p.stdout.read()
|
||||
# The version comes back as:
|
||||
# nginx version: nginx/1.10.0 (Ubuntu)
|
||||
version = raw.split(b'/')[-1].split(b' ')[0]
|
||||
hookenv.application_version_set(version.rstrip())
|
||||
|
||||
|
||||
@when('website.available')
|
||||
def provide_application_details(website):
|
||||
''' re-use the nginx layer website relation to relay the hostname/port
|
||||
to any consuming kubernetes-workers, or other units that require the
|
||||
kubernetes API '''
|
||||
website.configure(port=hookenv.config('port'))
|
||||
|
||||
|
||||
@when('loadbalancer.available')
|
||||
def provide_loadbalancing(loadbalancer):
|
||||
'''Send the public address and port to the public-address interface, so
|
||||
the subordinates can get the public address of this loadbalancer.'''
|
||||
loadbalancer.set_address_port(hookenv.unit_get('public-address'),
|
||||
hookenv.config('port'))
|
||||
|
||||
|
||||
@when('nrpe-external-master.available')
|
||||
@when_not('nrpe-external-master.initial-config')
|
||||
def initial_nrpe_config(nagios=None):
|
||||
set_state('nrpe-external-master.initial-config')
|
||||
update_nrpe_config(nagios)
|
||||
|
||||
|
||||
@when('nginx.available')
|
||||
@when('nrpe-external-master.available')
|
||||
@when_any('config.changed.nagios_context',
|
||||
'config.changed.nagios_servicegroups')
|
||||
def update_nrpe_config(unused=None):
|
||||
services = ('nginx',)
|
||||
|
||||
hostname = nrpe.get_nagios_hostname()
|
||||
current_unit = nrpe.get_nagios_unit_name()
|
||||
nrpe_setup = nrpe.NRPE(hostname=hostname)
|
||||
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
|
||||
nrpe_setup.write()
|
||||
|
||||
|
||||
@when_not('nrpe-external-master.available')
|
||||
@when('nrpe-external-master.initial-config')
|
||||
def remove_nrpe_config(nagios=None):
|
||||
remove_state('nrpe-external-master.initial-config')
|
||||
|
||||
# List of systemd services for which the checks will be removed
|
||||
services = ('nginx',)
|
||||
|
||||
# The current nrpe-external-master interface doesn't handle a lot of logic,
|
||||
# use the charm-helpers code for now.
|
||||
hostname = nrpe.get_nagios_hostname()
|
||||
nrpe_setup = nrpe.NRPE(hostname=hostname)
|
||||
|
||||
for service in services:
|
||||
nrpe_setup.remove_check(shortname=service)
|
41
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
{% for app in services -%}
|
||||
upstream target_service {
|
||||
{% for host in app['hosts'] -%}
|
||||
server {{ host['hostname'] }}:{{ host['port'] }};
|
||||
{% endfor %}
|
||||
}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
server {
|
||||
listen {{ port }} ssl http2;
|
||||
server_name {{ server_name }};
|
||||
|
||||
access_log /var/log/nginx.access.log;
|
||||
error_log /var/log/nginx.error.log;
|
||||
|
||||
ssl on;
|
||||
ssl_session_cache builtin:1000 shared:SSL:10m;
|
||||
ssl_certificate {{ server_certificate }};
|
||||
ssl_certificate_key {{ server_key }};
|
||||
ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
|
||||
location / {
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Proto-Version $http2;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
proxy_set_header X-Stream-Protocol-Version $http_x_stream_protocol_version;
|
||||
|
||||
add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version;
|
||||
|
||||
proxy_pass https://target_service;
|
||||
proxy_read_timeout 90;
|
||||
}
|
||||
}
|
135
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/README.md
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/README.md
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
# Kubernetes end to end
|
||||
|
||||
End-to-end (e2e) tests for Kubernetes provide a mechanism to test end-to-end
|
||||
behavior of the system, and is the last signal to ensure end user operations
|
||||
match developer specifications. Although unit and integration tests provide a
|
||||
good signal, in a distributed system like Kubernetes it is not uncommon that a
|
||||
minor change may pass all unit and integration tests, but cause unforeseen
|
||||
changes at the system level.
|
||||
|
||||
The primary objectives of the e2e tests are to ensure a consistent and reliable
|
||||
behavior of the kubernetes code base, and to catch hard-to-test bugs before
|
||||
users do, when unit and integration tests are insufficient.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
To deploy the end-to-end test suite, it is best to deploy the
|
||||
[kubernetes-core bundle](https://github.com/juju-solutions/bundle-kubernetes-core)
|
||||
and then relate the `kubernetes-e2e` charm.
|
||||
|
||||
```shell
|
||||
juju deploy kubernetes-core
|
||||
juju deploy cs:~containers/kubernetes-e2e
|
||||
juju add-relation kubernetes-e2e:kube-control kubernetes-master:kube-control
|
||||
juju add-relation kubernetes-e2e:kubernetes-master kubernetes-master:kube-api-endpoint
|
||||
juju add-relation kubernetes-e2e easyrsa
|
||||
```
|
||||
|
||||
|
||||
Once the relations have settled, and the `kubernetes-e2e` charm reports
|
||||
`Ready to test.` - you may kick off an end to end validation test.
|
||||
|
||||
### Running the e2e test
|
||||
|
||||
The e2e test is encapsulated as an action to ensure consistent runs of the
|
||||
end to end test. The defaults are sensible for most deployments.
|
||||
|
||||
```shell
|
||||
juju run-action kubernetes-e2e/0 test
|
||||
```
|
||||
|
||||
### Tuning the e2e test
|
||||
|
||||
The e2e test is configurable. By default it will focus on or skip the declared
|
||||
conformance tests in a cloud agnostic way. Default behaviors are configurable.
|
||||
This allows the operator to test only a subset of the conformance tests, or to
|
||||
test more behaviors not enabled by default. You can see all tunable options on
|
||||
the charm by inspecting the schema output of the actions:
|
||||
|
||||
```shell
|
||||
$ juju actions kubernetes-e2e --format=yaml --schema
|
||||
test:
|
||||
description: Run end-to-end validation test suite
|
||||
properties:
|
||||
focus:
|
||||
default: \[Conformance\]
|
||||
description: Regex focus for executing the test
|
||||
type: string
|
||||
skip:
|
||||
default: \[Flaky\]
|
||||
description: Regex of tests to skip
|
||||
type: string
|
||||
timeout:
|
||||
default: 30000
|
||||
description: Timeout in nanoseconds
|
||||
type: integer
|
||||
title: test
|
||||
type: object
|
||||
```
|
||||
|
||||
|
||||
As an example, you can run a more limited set of tests for rapid validation of
|
||||
a deployed cluster. The following example will skip the `Flaky`, `Slow`, and
|
||||
`Feature` labeled tests:
|
||||
|
||||
```shell
|
||||
juju run-action kubernetes-e2e/0 test skip='\[(Flaky|Slow|Feature:.*)\]'
|
||||
```
|
||||
|
||||
> Note: the escaping of the regex due to how bash handles brackets.
|
||||
|
||||
To see the different types of tests the Kubernetes end-to-end charm has access
|
||||
to, we encourage you to see the upstream documentation on the different types
|
||||
of tests, and to strongly understand what subsets of the tests you are running.
|
||||
|
||||
[Kinds of tests](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-tests.md#kinds-of-tests)
|
||||
|
||||
### More information on end-to-end testing
|
||||
|
||||
Along with the above descriptions, end-to-end testing is a much larger subject
|
||||
than this readme can encapsulate. There is far more information in the
|
||||
[end-to-end testing guide](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-tests.md).
|
||||
|
||||
### Evaluating end-to-end results
|
||||
|
||||
It is not enough to just simply run the test. Result output is stored in two
|
||||
places. The raw output of the e2e run is available in the `juju show-action-output`
|
||||
command, as well as a flat file on disk on the `kubernetes-e2e` unit that
|
||||
executed the test.
|
||||
|
||||
> Note: The results will only be available once the action has
|
||||
completed the test run. End-to-end testing can be quite time intensive. Often
|
||||
times taking **greater than 1 hour**, depending on configuration.
|
||||
|
||||
##### Flat file
|
||||
|
||||
```shell
|
||||
$ juju run-action kubernetes-e2e/0 test
|
||||
Action queued with id: 4ceed33a-d96d-465a-8f31-20d63442e51b
|
||||
|
||||
$ juju scp kubernetes-e2e/0:4ceed33a-d96d-465a-8f31-20d63442e51b.log .
|
||||
```
|
||||
|
||||
##### Action result output
|
||||
|
||||
```shell
|
||||
$ juju run-action kubernetes-e2e/0 test
|
||||
Action queued with id: 4ceed33a-d96d-465a-8f31-20d63442e51b
|
||||
|
||||
$ juju show-action-output 4ceed33a-d96d-465a-8f31-20d63442e51b
|
||||
```
|
||||
|
||||
## Known issues
|
||||
|
||||
The e2e test suite assumes egress network access. It will pull container
|
||||
images from `gcr.io`. You will need to have this registry unblocked in your
|
||||
firewall to successfully run e2e test results. Or you may use the exposed
|
||||
proxy settings [properly configured](https://github.com/juju-solutions/bundle-canonical-kubernetes#proxy-configuration)
|
||||
on the kubernetes-worker units.
|
||||
|
||||
## Help resources:
|
||||
|
||||
- [Bug Tracker](https://github.com/juju-solutions/bundle-canonical-kubernetes/issues)
|
||||
- [Github Repository](https://github.com/kubernetes/kubernetes/)
|
||||
- [Mailing List](mailto:juju@lists.ubuntu.com)
|
19
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/actions.yaml
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/actions.yaml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
test:
|
||||
description: "Execute an end to end test."
|
||||
params:
|
||||
focus:
|
||||
default: "\\[Conformance\\]"
|
||||
description: Run tests matching the focus regex pattern.
|
||||
type: string
|
||||
parallelism:
|
||||
default: 25
|
||||
description: The number of test nodes to run in parallel.
|
||||
type: integer
|
||||
skip:
|
||||
default: "\\[Flaky\\]|\\[Serial\\]"
|
||||
description: Skip tests matching the skip regex pattern.
|
||||
type: string
|
||||
timeout:
|
||||
default: 30000
|
||||
description: Timeout in nanoseconds
|
||||
type: integer
|
53
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/actions/test
generated
vendored
Executable file
53
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/actions/test
generated
vendored
Executable file
@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
export PATH="$PATH:/snap/bin"
|
||||
|
||||
# Grab the action parameter values
|
||||
FOCUS=$(action-get focus)
|
||||
SKIP=$(action-get skip)
|
||||
PARALLELISM=$(action-get parallelism)
|
||||
|
||||
if [ ! -f /home/ubuntu/.kube/config ]
|
||||
then
|
||||
action-fail "Missing Kubernetes configuration."
|
||||
action-set suggestion="Relate to the certificate authority, and kubernetes-master"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# get the host from the config file
|
||||
SERVER=$(cat /home/ubuntu/.kube/config | grep server | sed 's/ server: //')
|
||||
|
||||
ACTION_HOME=/home/ubuntu
|
||||
ACTION_LOG=$ACTION_HOME/${JUJU_ACTION_UUID}.log
|
||||
ACTION_LOG_TGZ=$ACTION_LOG.tar.gz
|
||||
ACTION_JUNIT=$ACTION_HOME/${JUJU_ACTION_UUID}-junit
|
||||
ACTION_JUNIT_TGZ=$ACTION_JUNIT.tar.gz
|
||||
|
||||
# This initializes an e2e build log with the START TIMESTAMP.
|
||||
echo "JUJU_E2E_START=$(date -u +%s)" | tee $ACTION_LOG
|
||||
echo "JUJU_E2E_VERSION=$(kubectl version | grep Server | cut -d " " -f 5 | cut -d ":" -f 2 | sed s/\"// | sed s/\",//)" | tee -a $ACTION_LOG
|
||||
GINKGO_ARGS="-nodes=$PARALLELISM" kubernetes-test.e2e \
|
||||
-kubeconfig /home/ubuntu/.kube/config \
|
||||
-host $SERVER \
|
||||
-ginkgo.focus $FOCUS \
|
||||
-ginkgo.skip "$SKIP" \
|
||||
-report-dir $ACTION_JUNIT 2>&1 | tee -a $ACTION_LOG
|
||||
|
||||
# This appends the END TIMESTAMP to the e2e build log
|
||||
echo "JUJU_E2E_END=$(date -u +%s)" | tee -a $ACTION_LOG
|
||||
|
||||
# set cwd to /home/ubuntu and tar the artifacts using a minimal directory
|
||||
# path. Extracing "home/ubuntu/1412341234/foobar.log is cumbersome in ci
|
||||
cd $ACTION_HOME/${JUJU_ACTION_UUID}-junit
|
||||
tar -czf $ACTION_JUNIT_TGZ *
|
||||
cd ..
|
||||
tar -czf $ACTION_LOG_TGZ ${JUJU_ACTION_UUID}.log
|
||||
|
||||
action-set log="$ACTION_LOG_TGZ"
|
||||
action-set junit="$ACTION_JUNIT_TGZ"
|
||||
|
||||
if tail ${JUJU_ACTION_UUID}.log | grep -q "Test Suite Failed"; then
|
||||
action-fail "Failure detected in the logs"
|
||||
fi
|
6
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/config.yaml
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/config.yaml
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
options:
|
||||
channel:
|
||||
type: string
|
||||
default: "1.8/stable"
|
||||
description: |
|
||||
Snap channel to install Kubernetes snaps from
|
362
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/icon.svg
generated
vendored
Normal file
362
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/icon.svg
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 26 KiB |
12
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/layer.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/layer.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
repo: https://github.com/juju-solutions/layer-kubernetes-e2e
|
||||
includes:
|
||||
- layer:basic
|
||||
- layer:tls-client
|
||||
- layer:snap
|
||||
- interface:http
|
||||
- interface:kube-control
|
||||
options:
|
||||
tls-client:
|
||||
ca_certificate_path: '/srv/kubernetes/ca.crt'
|
||||
client_certificate_path: '/srv/kubernetes/client.crt'
|
||||
client_key_path: '/srv/kubernetes/client.key'
|
31
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/metadata.yaml
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/metadata.yaml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
name: kubernetes-e2e
|
||||
summary: Run end-2-end validation of a clusters conformance
|
||||
maintainers:
|
||||
- Tim Van Steenburgh <tim.van.steenburgh@canonical.com>
|
||||
- George Kraft <george.kraft@canonical.com>
|
||||
- Rye Terrell <rye.terrell@canonical.com>
|
||||
- Konstantinos Tsakalozos <kos.tsakalozos@canonical.com>
|
||||
- Charles Butler <Chuck@dasroot.net>
|
||||
- Matthew Bruzek <mbruzek@ubuntu.com>
|
||||
description: |
|
||||
Deploy the Kubernetes e2e framework and validate the conformance of a
|
||||
deployed kubernetes cluster
|
||||
tags:
|
||||
- validation
|
||||
- conformance
|
||||
series:
|
||||
- xenial
|
||||
requires:
|
||||
kubernetes-master:
|
||||
interface: http
|
||||
kube-control:
|
||||
interface: kube-control
|
||||
resources:
|
||||
kubectl:
|
||||
type: file
|
||||
filename: kubectl.snap
|
||||
description: kubectl snap
|
||||
kubernetes-test:
|
||||
type: file
|
||||
filename: kubernetes-test.snap
|
||||
description: kubernetes-test snap
|
220
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py
generated
vendored
Normal file
220
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from charms import layer
|
||||
from charms.layer import snap
|
||||
|
||||
from charms.reactive import hook
|
||||
from charms.reactive import is_state
|
||||
from charms.reactive import set_state
|
||||
from charms.reactive import when
|
||||
from charms.reactive import when_not
|
||||
from charms.reactive.helpers import data_changed
|
||||
|
||||
from charmhelpers.core import hookenv, unitdata
|
||||
|
||||
from shlex import split
|
||||
|
||||
from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
|
||||
db = unitdata.kv()
|
||||
USER = 'system:e2e'
|
||||
|
||||
|
||||
@hook('upgrade-charm')
|
||||
def reset_delivery_states():
|
||||
''' Remove the state set when resources are unpacked. '''
|
||||
install_snaps()
|
||||
|
||||
|
||||
@when('kubernetes-e2e.installed')
|
||||
def report_status():
|
||||
''' Report the status of the charm. '''
|
||||
messaging()
|
||||
|
||||
|
||||
def messaging():
|
||||
''' Probe our relations to determine the propper messaging to the
|
||||
end user '''
|
||||
|
||||
missing_services = []
|
||||
if not is_state('kubernetes-master.available'):
|
||||
missing_services.append('kubernetes-master:http')
|
||||
if not is_state('certificates.available'):
|
||||
missing_services.append('certificates')
|
||||
if not is_state('kubeconfig.ready'):
|
||||
missing_services.append('kubernetes-master:kube-control')
|
||||
|
||||
if missing_services:
|
||||
if len(missing_services) > 1:
|
||||
subject = 'relations'
|
||||
else:
|
||||
subject = 'relation'
|
||||
|
||||
services = ','.join(missing_services)
|
||||
message = 'Missing {0}: {1}'.format(subject, services)
|
||||
hookenv.status_set('blocked', message)
|
||||
return
|
||||
|
||||
hookenv.status_set('active', 'Ready to test.')
|
||||
|
||||
|
||||
@when('config.changed.channel')
|
||||
def channel_changed():
|
||||
install_snaps()
|
||||
|
||||
|
||||
def install_snaps():
|
||||
''' Deliver the e2e and kubectl components from the binary resource stream
|
||||
packages declared in the charm '''
|
||||
channel = hookenv.config('channel')
|
||||
hookenv.status_set('maintenance', 'Installing kubectl snap')
|
||||
snap.install('kubectl', channel=channel, classic=True)
|
||||
hookenv.status_set('maintenance', 'Installing kubernetes-test snap')
|
||||
snap.install('kubernetes-test', channel=channel, classic=True)
|
||||
set_state('kubernetes-e2e.installed')
|
||||
|
||||
|
||||
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
|
||||
'tls_client.client.key.saved', 'kubernetes-master.available',
|
||||
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped')
|
||||
@when_not('kubeconfig.ready')
|
||||
def prepare_kubeconfig_certificates(master):
|
||||
''' Prepare the data to feed to create the kubeconfig file. '''
|
||||
|
||||
layer_options = layer.options('tls-client')
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
creds = db.get('credentials')
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
servers = get_kube_api_servers(master)
|
||||
|
||||
# pedantry
|
||||
kubeconfig_path = '/home/ubuntu/.kube/config'
|
||||
|
||||
# Create kubernetes configuration in the default location for ubuntu.
|
||||
create_kubeconfig('/root/.kube/config', servers[0], ca,
|
||||
token=creds['client_token'], user='root')
|
||||
create_kubeconfig(kubeconfig_path, servers[0], ca,
|
||||
token=creds['client_token'], user='ubuntu')
|
||||
# Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
|
||||
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
|
||||
check_call(cmd)
|
||||
messaging()
|
||||
set_state('kubeconfig.ready')
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def request_credentials(kube_control):
|
||||
""" Request authorization creds."""
|
||||
|
||||
# Ask for a user, although we will be using the 'client_token'
|
||||
kube_control.set_auth_request(USER)
|
||||
|
||||
|
||||
@when('kube-control.auth.available')
|
||||
def catch_change_in_creds(kube_control):
|
||||
"""Request a service restart in case credential updates were detected."""
|
||||
creds = kube_control.get_auth_credentials(USER)
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
and creds['user'] == USER:
|
||||
# We need to cache the credentials here because if the
|
||||
# master changes (master leader dies and replaced by a new one)
|
||||
# the new master will have no recollection of our certs.
|
||||
db.set('credentials', creds)
|
||||
set_state('e2e.auth.bootstrapped')
|
||||
|
||||
|
||||
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
|
||||
def set_app_version():
|
||||
''' Declare the application version to juju '''
|
||||
cmd = ['kubectl', 'version', '--client']
|
||||
from subprocess import CalledProcessError
|
||||
try:
|
||||
version = check_output(cmd).decode('utf-8')
|
||||
except CalledProcessError:
|
||||
message = "Missing kubeconfig causes errors. Skipping version set."
|
||||
hookenv.log(message)
|
||||
return
|
||||
git_version = version.split('GitVersion:"v')[-1]
|
||||
version_from = git_version.split('",')[0]
|
||||
hookenv.application_version_set(version_from.rstrip())
|
||||
|
||||
|
||||
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
|
||||
user='ubuntu', context='juju-context',
|
||||
cluster='juju-cluster', password=None, token=None):
|
||||
'''Create a configuration for Kubernetes based on path using the supplied
|
||||
arguments for values of the Kubernetes server, CA, key, certificate, user
|
||||
context and cluster.'''
|
||||
if not key and not certificate and not password and not token:
|
||||
raise ValueError('Missing authentication mechanism.')
|
||||
|
||||
# token and password are mutually exclusive. Error early if both are
|
||||
# present. The developer has requested an impossible situation.
|
||||
# see: kubectl config set-credentials --help
|
||||
if token and password:
|
||||
raise ValueError('Token and Password are mutually exclusive.')
|
||||
# Create the config file with the address of the master server.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
|
||||
'--server={2} --certificate-authority={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
|
||||
# Delete old users
|
||||
cmd = 'kubectl config --kubeconfig={0} unset users'
|
||||
check_call(split(cmd.format(kubeconfig)))
|
||||
# Create the credentials using the client flags.
|
||||
cmd = 'kubectl config --kubeconfig={0} ' \
|
||||
'set-credentials {1} '.format(kubeconfig, user)
|
||||
|
||||
if key and certificate:
|
||||
cmd = '{0} --client-key={1} --client-certificate={2} '\
|
||||
'--embed-certs=true'.format(cmd, key, certificate)
|
||||
if password:
|
||||
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
|
||||
# This is mutually exclusive from password. They will not work together.
|
||||
if token:
|
||||
cmd = "{0} --token={1}".format(cmd, token)
|
||||
check_call(split(cmd))
|
||||
# Create a default context with the cluster.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
|
||||
'--cluster={2} --user={3}'
|
||||
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
|
||||
# Make the config use this new context.
|
||||
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
|
||||
check_call(split(cmd.format(kubeconfig, context)))
|
||||
|
||||
|
||||
def get_kube_api_servers(master):
|
||||
'''Return the kubernetes api server address and port for this
|
||||
relationship.'''
|
||||
hosts = []
|
||||
# Iterate over every service from the relation object.
|
||||
for service in master.services():
|
||||
for unit in service['hosts']:
|
||||
hosts.append('https://{0}:{1}'.format(unit['hostname'],
|
||||
unit['port']))
|
||||
return hosts
|
||||
|
||||
|
||||
def determine_arch():
|
||||
''' dpkg wrapper to surface the architecture we are tied to'''
|
||||
cmd = ['dpkg', '--print-architecture']
|
||||
output = check_output(cmd).decode('utf-8')
|
||||
|
||||
return output.rstrip()
|
12
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/tox.ini
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/tox.ini
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[tox]
|
||||
skipsdist=True
|
||||
envlist = py34, py35
|
||||
skip_missing_interpreters = True
|
||||
|
||||
[testenv]
|
||||
commands = py.test -v
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
[flake8]
|
||||
exclude=docs
|
100
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/README.md
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/README.md
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
# Kubernetes-master
|
||||
|
||||
[Kubernetes](http://kubernetes.io/) is an open source system for managing
|
||||
application containers across a cluster of hosts. The Kubernetes project was
|
||||
started by Google in 2014, combining the experience of running production
|
||||
workloads combined with best practices from the community.
|
||||
|
||||
The Kubernetes project defines some new terms that may be unfamiliar to users
|
||||
or operators. For more information please refer to the concept guide in the
|
||||
[getting started guide](https://kubernetes.io/docs/home/).
|
||||
|
||||
This charm is an encapsulation of the Kubernetes master processes and the
|
||||
operations to run on any cloud for the entire lifecycle of the cluster.
|
||||
|
||||
This charm is built from other charm layers using the Juju reactive framework.
|
||||
The other layers focus on specific subset of operations making this layer
|
||||
specific to operations of Kubernetes master processes.
|
||||
|
||||
# Deployment
|
||||
|
||||
This charm is not fully functional when deployed by itself. It requires other
|
||||
charms to model a complete Kubernetes cluster. A Kubernetes cluster needs a
|
||||
distributed key value store such as [Etcd](https://coreos.com/etcd/) and the
|
||||
kubernetes-worker charm which delivers the Kubernetes node services. A cluster
|
||||
requires a Software Defined Network (SDN) and Transport Layer Security (TLS) so
|
||||
the components in a cluster communicate securely.
|
||||
|
||||
Please take a look at the [Canonical Distribution of Kubernetes](https://jujucharms.com/canonical-kubernetes/)
|
||||
or the [Kubernetes core](https://jujucharms.com/kubernetes-core/) bundles for
|
||||
examples of complete models of Kubernetes clusters.
|
||||
|
||||
# Resources
|
||||
|
||||
The kubernetes-master charm takes advantage of the [Juju Resources](https://jujucharms.com/docs/2.0/developer-resources)
|
||||
feature to deliver the Kubernetes software.
|
||||
|
||||
In deployments on public clouds the Charm Store provides the resource to the
|
||||
charm automatically with no user intervention. Some environments with strict
|
||||
firewall rules may not be able to contact the Charm Store. In these network
|
||||
restricted environments the resource can be uploaded to the model by the Juju
|
||||
operator.
|
||||
|
||||
# Configuration
|
||||
|
||||
This charm supports some configuration options to set up a Kubernetes cluster
|
||||
that works in your environment:
|
||||
|
||||
#### dns_domain
|
||||
|
||||
The domain name to use for the Kubernetes cluster for DNS.
|
||||
|
||||
#### enable-dashboard-addons
|
||||
|
||||
Enables the installation of Kubernetes dashboard, Heapster, Grafana, and
|
||||
InfluxDB.
|
||||
|
||||
#### enable-rbac
|
||||
|
||||
Enable RBAC and Node authorisation.
|
||||
|
||||
# DNS for the cluster
|
||||
|
||||
The DNS add-on allows the pods to have a DNS names in addition to IP addresses.
|
||||
The Kubernetes cluster DNS server (based off the SkyDNS library) supports
|
||||
forward lookups (A records), service lookups (SRV records) and reverse IP
|
||||
address lookups (PTR records). More information about the DNS can be obtained
|
||||
from the [Kubernetes DNS admin guide](http://kubernetes.io/docs/admin/dns/).
|
||||
|
||||
# Actions
|
||||
|
||||
The kubernetes-master charm models a few one time operations called
|
||||
[Juju actions](https://jujucharms.com/docs/stable/actions) that can be run by
|
||||
Juju users.
|
||||
|
||||
#### create-rbd-pv
|
||||
|
||||
This action creates RADOS Block Device (RBD) in Ceph and defines a Persistent
|
||||
Volume in Kubernetes so the containers can use durable storage. This action
|
||||
requires a relation to the ceph-mon charm before it can create the volume.
|
||||
|
||||
#### restart
|
||||
|
||||
This action restarts the master processes `kube-apiserver`,
|
||||
`kube-controller-manager`, and `kube-scheduler` when the user needs a restart.
|
||||
|
||||
# More information
|
||||
|
||||
- [Kubernetes github project](https://github.com/kubernetes/kubernetes)
|
||||
- [Kubernetes issue tracker](https://github.com/kubernetes/kubernetes/issues)
|
||||
- [Kubernetes documentation](http://kubernetes.io/docs/)
|
||||
- [Kubernetes releases](https://github.com/kubernetes/kubernetes/releases)
|
||||
|
||||
# Contact
|
||||
|
||||
The kubernetes-master charm is free and open source operations created
|
||||
by the containers team at Canonical.
|
||||
|
||||
Canonical also offers enterprise support and customization services. Please
|
||||
refer to the [Kubernetes product page](https://www.ubuntu.com/cloud/kubernetes)
|
||||
for more details.
|
50
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions.yaml
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions.yaml
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
restart:
|
||||
description: Restart the Kubernetes master services on demand.
|
||||
create-rbd-pv:
|
||||
description: Create RADOS Block Device (RDB) volume in Ceph and creates PersistentVolume.
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: Name the persistent volume.
|
||||
minLength: 1
|
||||
size:
|
||||
type: integer
|
||||
description: Size in MB of the RBD volume.
|
||||
minimum: 1
|
||||
mode:
|
||||
type: string
|
||||
default: ReadWriteOnce
|
||||
description: Access mode for the persistent volume.
|
||||
filesystem:
|
||||
type: string
|
||||
default: xfs
|
||||
description: File system type to format the volume.
|
||||
skip-size-check:
|
||||
type: boolean
|
||||
default: false
|
||||
description: Allow creation of overprovisioned RBD.
|
||||
required:
|
||||
- name
|
||||
- size
|
||||
namespace-list:
|
||||
description: List existing k8s namespaces
|
||||
namespace-create:
|
||||
description: Create new namespace
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: Namespace name eg. staging
|
||||
minLength: 2
|
||||
required:
|
||||
- name
|
||||
namespace-delete:
|
||||
description: Delete namespace
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: Namespace name eg. staging
|
||||
minLength: 2
|
||||
required:
|
||||
- name
|
||||
upgrade:
|
||||
description: Upgrade the kubernetes snaps
|
300
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/create-rbd-pv
generated
vendored
Executable file
300
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/create-rbd-pv
generated
vendored
Executable file
@ -0,0 +1,300 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from charmhelpers.core.templating import render
|
||||
from charms.reactive import is_state
|
||||
from charmhelpers.core.hookenv import action_get
|
||||
from charmhelpers.core.hookenv import action_set
|
||||
from charmhelpers.core.hookenv import action_fail
|
||||
from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
from subprocess import CalledProcessError
|
||||
from tempfile import TemporaryDirectory
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
|
||||
|
||||
def main():
|
||||
''' Control logic to enlist Ceph RBD volumes as PersistentVolumes in
|
||||
Kubernetes. This will invoke the validation steps, and only execute if
|
||||
this script thinks the environment is 'sane' enough to provision volumes.
|
||||
'''
|
||||
|
||||
# validate relationship pre-reqs before additional steps can be taken
|
||||
if not validate_relation():
|
||||
print('Failed ceph relationship check')
|
||||
action_fail('Failed ceph relationship check')
|
||||
return
|
||||
|
||||
if not is_ceph_healthy():
|
||||
print('Ceph was not healthy.')
|
||||
action_fail('Ceph was not healthy.')
|
||||
return
|
||||
|
||||
context = {}
|
||||
|
||||
context['RBD_NAME'] = action_get_or_default('name').strip()
|
||||
context['RBD_SIZE'] = action_get_or_default('size')
|
||||
context['RBD_FS'] = action_get_or_default('filesystem').strip()
|
||||
context['PV_MODE'] = action_get_or_default('mode').strip()
|
||||
|
||||
# Ensure we're not exceeding available space in the pool
|
||||
if not validate_space(context['RBD_SIZE']):
|
||||
return
|
||||
|
||||
# Ensure our paramters match
|
||||
param_validation = validate_parameters(context['RBD_NAME'],
|
||||
context['RBD_FS'],
|
||||
context['PV_MODE'])
|
||||
if not param_validation == 0:
|
||||
return
|
||||
|
||||
if not validate_unique_volume_name(context['RBD_NAME']):
|
||||
action_fail('Volume name collision detected. Volume creation aborted.')
|
||||
return
|
||||
|
||||
context['monitors'] = get_monitors()
|
||||
|
||||
# Invoke creation and format the mount device
|
||||
create_rbd_volume(context['RBD_NAME'],
|
||||
context['RBD_SIZE'],
|
||||
context['RBD_FS'])
|
||||
|
||||
# Create a temporary workspace to render our persistentVolume template, and
|
||||
# enlist the RDB based PV we've just created
|
||||
with TemporaryDirectory() as active_working_path:
|
||||
temp_template = '{}/pv.yaml'.format(active_working_path)
|
||||
render('rbd-persistent-volume.yaml', temp_template, context)
|
||||
|
||||
cmd = ['kubectl', 'create', '-f', temp_template]
|
||||
debug_command(cmd)
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def action_get_or_default(key):
|
||||
''' Convenience method to manage defaults since actions dont appear to
|
||||
properly support defaults '''
|
||||
|
||||
value = action_get(key)
|
||||
if value:
|
||||
return value
|
||||
elif key == 'filesystem':
|
||||
return 'xfs'
|
||||
elif key == 'size':
|
||||
return 0
|
||||
elif key == 'mode':
|
||||
return "ReadWriteOnce"
|
||||
elif key == 'skip-size-check':
|
||||
return False
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
def create_rbd_volume(name, size, filesystem):
|
||||
''' Create the RBD volume in Ceph. Then mount it locally to format it for
|
||||
the requested filesystem.
|
||||
|
||||
:param name - The name of the RBD volume
|
||||
:param size - The size in MB of the volume
|
||||
:param filesystem - The type of filesystem to format the block device
|
||||
'''
|
||||
|
||||
# Create the rbd volume
|
||||
# $ rbd create foo --size 50 --image-feature layering
|
||||
command = ['rbd', 'create', '--size', '{}'.format(size), '--image-feature',
|
||||
'layering', name]
|
||||
debug_command(command)
|
||||
check_call(command)
|
||||
|
||||
# Lift the validation sequence to determine if we actually created the
|
||||
# rbd volume
|
||||
if validate_unique_volume_name(name):
|
||||
# we failed to create the RBD volume. whoops
|
||||
action_fail('RBD Volume not listed after creation.')
|
||||
print('Ceph RBD volume {} not found in rbd list'.format(name))
|
||||
# hack, needs love if we're killing the process thread this deep in
|
||||
# the call stack.
|
||||
sys.exit(0)
|
||||
|
||||
mount = ['rbd', 'map', name]
|
||||
debug_command(mount)
|
||||
device_path = check_output(mount).strip()
|
||||
|
||||
try:
|
||||
format_command = ['mkfs.{}'.format(filesystem), device_path]
|
||||
debug_command(format_command)
|
||||
check_call(format_command)
|
||||
unmount = ['rbd', 'unmap', name]
|
||||
debug_command(unmount)
|
||||
check_call(unmount)
|
||||
except CalledProcessError:
|
||||
print('Failed to format filesystem and unmount. RBD created but not'
|
||||
' enlisted.')
|
||||
action_fail('Failed to format filesystem and unmount.'
|
||||
' RDB created but not enlisted.')
|
||||
|
||||
|
||||
def is_ceph_healthy():
|
||||
''' Probe the remote ceph cluster for health status '''
|
||||
command = ['ceph', 'health']
|
||||
debug_command(command)
|
||||
health_output = check_output(command)
|
||||
if b'HEALTH_OK' in health_output:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def get_monitors():
|
||||
''' Parse the monitors out of /etc/ceph/ceph.conf '''
|
||||
found_hosts = []
|
||||
# This is kind of hacky. We should be piping this in from juju relations
|
||||
with open('/etc/ceph/ceph.conf', 'r') as ceph_conf:
|
||||
for line in ceph_conf.readlines():
|
||||
if 'mon host' in line:
|
||||
# strip out the key definition
|
||||
hosts = line.lstrip('mon host = ').split(' ')
|
||||
for host in hosts:
|
||||
found_hosts.append(host)
|
||||
return found_hosts
|
||||
|
||||
|
||||
def get_available_space():
|
||||
''' Determine the space available in the RBD pool. Throw an exception if
|
||||
the RBD pool ('rbd') isn't found. '''
|
||||
command = 'ceph df -f json'.split()
|
||||
debug_command(command)
|
||||
out = check_output(command).decode('utf-8')
|
||||
data = json.loads(out)
|
||||
for pool in data['pools']:
|
||||
if pool['name'] == 'rbd':
|
||||
return int(pool['stats']['max_avail'] / (1024 * 1024))
|
||||
raise UnknownAvailableSpaceException('Unable to determine available space.') # noqa
|
||||
|
||||
|
||||
def validate_unique_volume_name(name):
|
||||
''' Poll the CEPH-MON services to determine if we have a unique rbd volume
|
||||
name to use. If there is naming collisions, block the request for volume
|
||||
provisioning.
|
||||
|
||||
:param name - The name of the RBD volume
|
||||
'''
|
||||
|
||||
command = ['rbd', 'list']
|
||||
debug_command(command)
|
||||
raw_out = check_output(command)
|
||||
|
||||
# Split the output on newlines
|
||||
# output spec:
|
||||
# $ rbd list
|
||||
# foo
|
||||
# foobar
|
||||
volume_list = raw_out.decode('utf-8').splitlines()
|
||||
|
||||
for volume in volume_list:
|
||||
if volume.strip() == name:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def validate_relation():
|
||||
''' Determine if we are related to ceph. If we are not, we should
|
||||
note this in the action output and fail this action run. We are relying
|
||||
on specific files in specific paths to be placed in order for this function
|
||||
to work. This method verifies those files are placed. '''
|
||||
|
||||
# TODO: Validate that the ceph-common package is installed
|
||||
if not is_state('ceph-storage.available'):
|
||||
message = 'Failed to detect connected ceph-mon'
|
||||
print(message)
|
||||
action_set({'pre-req.ceph-relation': message})
|
||||
return False
|
||||
|
||||
if not os.path.isfile('/etc/ceph/ceph.conf'):
|
||||
message = 'No Ceph configuration found in /etc/ceph/ceph.conf'
|
||||
print(message)
|
||||
action_set({'pre-req.ceph-configuration': message})
|
||||
return False
|
||||
|
||||
# TODO: Validate ceph key
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def validate_space(size):
|
||||
if action_get_or_default('skip-size-check'):
|
||||
return True
|
||||
available_space = get_available_space()
|
||||
if available_space < size:
|
||||
msg = 'Unable to allocate RBD of size {}MB, only {}MB are available.'
|
||||
action_fail(msg.format(size, available_space))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def validate_parameters(name, fs, mode):
|
||||
''' Validate the user inputs to ensure they conform to what the
|
||||
action expects. This method will check the naming characters used
|
||||
for the rbd volume, ensure they have selected a fstype we are expecting
|
||||
and the mode against our whitelist '''
|
||||
name_regex = '^[a-zA-z0-9][a-zA-Z0-9|-]'
|
||||
|
||||
fs_whitelist = ['xfs', 'ext4']
|
||||
|
||||
# see http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes
|
||||
# for supported operations on RBD volumes.
|
||||
mode_whitelist = ['ReadWriteOnce', 'ReadOnlyMany']
|
||||
|
||||
fails = 0
|
||||
|
||||
if not re.match(name_regex, name):
|
||||
message = 'Validation failed for RBD volume-name'
|
||||
action_fail(message)
|
||||
fails = fails + 1
|
||||
action_set({'validation.name': message})
|
||||
|
||||
if fs not in fs_whitelist:
|
||||
message = 'Validation failed for file system'
|
||||
action_fail(message)
|
||||
fails = fails + 1
|
||||
action_set({'validation.filesystem': message})
|
||||
|
||||
if mode not in mode_whitelist:
|
||||
message = "Validation failed for mode"
|
||||
action_fail(message)
|
||||
fails = fails + 1
|
||||
action_set({'validation.mode': message})
|
||||
|
||||
return fails
|
||||
|
||||
|
||||
def debug_command(cmd):
|
||||
''' Print a debug statement of the command invoked '''
|
||||
print("Invoking {}".format(cmd))
|
||||
|
||||
|
||||
class UnknownAvailableSpaceException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
59
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/namespace-create
generated
vendored
Executable file
59
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/namespace-create
generated
vendored
Executable file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
from yaml import safe_load as load
|
||||
from charmhelpers.core.hookenv import (
|
||||
action_get,
|
||||
action_set,
|
||||
action_fail,
|
||||
action_name
|
||||
)
|
||||
from charmhelpers.core.templating import render
|
||||
from subprocess import check_output
|
||||
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
|
||||
|
||||
def kubectl(args):
|
||||
cmd = ['kubectl'] + args
|
||||
return check_output(cmd)
|
||||
|
||||
|
||||
def namespace_list():
|
||||
y = load(kubectl(['get', 'namespaces', '-o', 'yaml']))
|
||||
ns = [i['metadata']['name'] for i in y['items']]
|
||||
action_set({'namespaces': ', '.join(ns)+'.'})
|
||||
return ns
|
||||
|
||||
|
||||
def namespace_create():
|
||||
name = action_get('name')
|
||||
if name in namespace_list():
|
||||
action_fail('Namespace "{}" already exists.'.format(name))
|
||||
return
|
||||
|
||||
render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml',
|
||||
context={'name': name})
|
||||
kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml'])
|
||||
action_set({'msg': 'Namespace "{}" created.'.format(name)})
|
||||
|
||||
|
||||
def namespace_delete():
|
||||
name = action_get('name')
|
||||
if name in ['default', 'kube-system']:
|
||||
action_fail('Not allowed to delete "{}".'.format(name))
|
||||
return
|
||||
if name not in namespace_list():
|
||||
action_fail('Namespace "{}" does not exist.'.format(name))
|
||||
return
|
||||
kubectl(['delete', 'ns/'+name])
|
||||
action_set({'msg': 'Namespace "{}" deleted.'.format(name)})
|
||||
|
||||
|
||||
action = action_name().replace('namespace-', '')
|
||||
if action == 'create':
|
||||
namespace_create()
|
||||
elif action == 'list':
|
||||
namespace_list()
|
||||
elif action == 'delete':
|
||||
namespace_delete()
|
1
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/namespace-delete
generated
vendored
Symbolic link
1
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/namespace-delete
generated
vendored
Symbolic link
@ -0,0 +1 @@
|
||||
namespace-create
|
1
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/namespace-list
generated
vendored
Symbolic link
1
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/namespace-list
generated
vendored
Symbolic link
@ -0,0 +1 @@
|
||||
namespace-create
|
14
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/restart
generated
vendored
Executable file
14
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/restart
generated
vendored
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
set +ex
|
||||
|
||||
# Restart the apiserver, controller-manager, and scheduler
|
||||
|
||||
systemctl restart snap.kube-apiserver.daemon
|
||||
action-set apiserver.status='restarted'
|
||||
|
||||
systemctl restart snap.kube-controller-manager.daemon
|
||||
action-set controller-manager.status='restarted'
|
||||
|
||||
systemctl restart snap.kube-scheduler.daemon
|
||||
action-set kube-scheduler.status='restarted'
|
5
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/upgrade
generated
vendored
Executable file
5
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/actions/upgrade
generated
vendored
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
set -eux
|
||||
|
||||
charms.reactive set_state kubernetes-master.upgrade-specified
|
||||
exec hooks/config-changed
|
78
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/config.yaml
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/config.yaml
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
options:
|
||||
enable-dashboard-addons:
|
||||
type: boolean
|
||||
default: True
|
||||
description: Deploy the Kubernetes Dashboard and Heapster addons
|
||||
dns_domain:
|
||||
type: string
|
||||
default: cluster.local
|
||||
description: The local domain for cluster dns
|
||||
extra_sans:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space-separated list of extra SAN entries to add to the x509 certificate
|
||||
created for the master nodes.
|
||||
service-cidr:
|
||||
type: string
|
||||
default: 10.152.183.0/24
|
||||
description: CIDR to user for Kubernetes services. Cannot be changed after deployment.
|
||||
allow-privileged:
|
||||
type: string
|
||||
default: "auto"
|
||||
description: |
|
||||
Allow kube-apiserver to run in privileged mode. Supported values are
|
||||
"true", "false", and "auto". If "true", kube-apiserver will run in
|
||||
privileged mode by default. If "false", kube-apiserver will never run in
|
||||
privileged mode. If "auto", kube-apiserver will not run in privileged
|
||||
mode by default, but will switch to privileged mode if gpu hardware is
|
||||
detected on a worker node.
|
||||
channel:
|
||||
type: string
|
||||
default: "1.8/stable"
|
||||
description: |
|
||||
Snap channel to install Kubernetes master services from
|
||||
client_password:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Password to be used for admin user (leave empty for random password).
|
||||
api-extra-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space separated list of flags and key=value pairs that will be passed as arguments to
|
||||
kube-apiserver. For example a value like this:
|
||||
runtime-config=batch/v2alpha1=true profiling=true
|
||||
will result in kube-apiserver being run with the following options:
|
||||
--runtime-config=batch/v2alpha1=true --profiling=true
|
||||
controller-manager-extra-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space separated list of flags and key=value pairs that will be passed as arguments to
|
||||
kube-controller-manager. For example a value like this:
|
||||
runtime-config=batch/v2alpha1=true profiling=true
|
||||
will result in kube-controller-manager being run with the following options:
|
||||
--runtime-config=batch/v2alpha1=true --profiling=true
|
||||
scheduler-extra-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space separated list of flags and key=value pairs that will be passed as arguments to
|
||||
kube-scheduler. For example a value like this:
|
||||
runtime-config=batch/v2alpha1=true profiling=true
|
||||
will result in kube-scheduler being run with the following options:
|
||||
--runtime-config=batch/v2alpha1=true --profiling=true
|
||||
authorization-mode:
|
||||
type: string
|
||||
default: "AlwaysAllow"
|
||||
description: |
|
||||
Comma separated authorization modes. Allowed values are
|
||||
"RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow".
|
||||
require-manual-upgrade:
|
||||
type: boolean
|
||||
default: true
|
||||
description: |
|
||||
When true, master nodes will not be upgraded until the user triggers
|
||||
it manually by running the upgrade action.
|
13
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/copyright
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/copyright
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
15
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/debug-scripts/kubectl
generated
vendored
Executable file
15
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/debug-scripts/kubectl
generated
vendored
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
set -ux
|
||||
|
||||
export PATH=$PATH:/snap/bin
|
||||
|
||||
alias kubectl="kubectl --kubeconfig=/home/ubuntu/config"
|
||||
|
||||
kubectl cluster-info > $DEBUG_SCRIPT_DIR/cluster-info
|
||||
kubectl cluster-info dump > $DEBUG_SCRIPT_DIR/cluster-info-dump
|
||||
for obj in pods svc ingress secrets pv pvc rc; do
|
||||
kubectl describe $obj --all-namespaces > $DEBUG_SCRIPT_DIR/describe-$obj
|
||||
done
|
||||
for obj in nodes; do
|
||||
kubectl describe $obj > $DEBUG_SCRIPT_DIR/describe-$obj
|
||||
done
|
9
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/debug-scripts/kubernetes-master-services
generated
vendored
Executable file
9
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/debug-scripts/kubernetes-master-services
generated
vendored
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
set -ux
|
||||
|
||||
for service in kube-apiserver kube-controller-manager kube-scheduler; do
|
||||
systemctl status snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-systemctl-status
|
||||
journalctl -u snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-journal
|
||||
done
|
||||
|
||||
# FIXME: grab snap config or something
|
17
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/exec.d/vmware-patch/charm-pre-install
generated
vendored
Executable file
17
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/exec.d/vmware-patch/charm-pre-install
generated
vendored
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
MY_HOSTNAME=$(hostname)
|
||||
|
||||
: ${JUJU_UNIT_NAME:=`uuidgen`}
|
||||
|
||||
|
||||
if [ "${MY_HOSTNAME}" == "ubuntuguest" ]; then
|
||||
juju-log "Detected broken vsphere integration. Applying hostname override"
|
||||
|
||||
FRIENDLY_HOSTNAME=$(echo $JUJU_UNIT_NAME | tr / -)
|
||||
juju-log "Setting hostname to $FRIENDLY_HOSTNAME"
|
||||
if [ ! -f /etc/hostname.orig ]; then
|
||||
mv /etc/hostname /etc/hostname.orig
|
||||
fi
|
||||
echo "${FRIENDLY_HOSTNAME}" > /etc/hostname
|
||||
hostname $FRIENDLY_HOSTNAME
|
||||
fi
|
362
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/icon.svg
generated
vendored
Normal file
362
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/icon.svg
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 26 KiB |
32
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/layer.yaml
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/layer.yaml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
repo: https://github.com/kubernetes/kubernetes.git
|
||||
includes:
|
||||
- 'layer:basic'
|
||||
- 'layer:snap'
|
||||
- 'layer:tls-client'
|
||||
- 'layer:leadership'
|
||||
- 'layer:debug'
|
||||
- 'layer:metrics'
|
||||
- 'layer:nagios'
|
||||
- 'layer:cdk-service-kicker'
|
||||
- 'interface:ceph-admin'
|
||||
- 'interface:etcd'
|
||||
- 'interface:http'
|
||||
- 'interface:kubernetes-cni'
|
||||
- 'interface:kube-dns'
|
||||
- 'interface:kube-control'
|
||||
- 'interface:public-address'
|
||||
options:
|
||||
basic:
|
||||
packages:
|
||||
- socat
|
||||
tls-client:
|
||||
ca_certificate_path: '/root/cdk/ca.crt'
|
||||
server_certificate_path: '/root/cdk/server.crt'
|
||||
server_key_path: '/root/cdk/server.key'
|
||||
client_certificate_path: '/root/cdk/client.crt'
|
||||
client_key_path: '/root/cdk/client.key'
|
||||
cdk-service-kicker:
|
||||
services:
|
||||
- snap.kube-apiserver.daemon
|
||||
- snap.kube-controller-manager.daemon
|
||||
- snap.kube-scheduler.daemon
|
71
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/common.py
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/common.py
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from time import sleep
|
||||
|
||||
|
||||
def get_version(bin_name):
|
||||
"""Get the version of an installed Kubernetes binary.
|
||||
|
||||
:param str bin_name: Name of binary
|
||||
:return: 3-tuple version (maj, min, patch)
|
||||
|
||||
Example::
|
||||
|
||||
>>> `get_version('kubelet')
|
||||
(1, 6, 0)
|
||||
|
||||
"""
|
||||
cmd = '{} --version'.format(bin_name).split()
|
||||
version_string = subprocess.check_output(cmd).decode('utf-8')
|
||||
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
||||
|
||||
|
||||
def retry(times, delay_secs):
|
||||
""" Decorator for retrying a method call.
|
||||
|
||||
Args:
|
||||
times: How many times should we retry before giving up
|
||||
delay_secs: Delay in secs
|
||||
|
||||
Returns: A callable that would return the last call outcome
|
||||
"""
|
||||
|
||||
def retry_decorator(func):
|
||||
""" Decorator to wrap the function provided.
|
||||
|
||||
Args:
|
||||
func: Provided function should return either True od False
|
||||
|
||||
Returns: A callable that would return the last call outcome
|
||||
|
||||
"""
|
||||
def _wrapped(*args, **kwargs):
|
||||
res = func(*args, **kwargs)
|
||||
attempt = 0
|
||||
while not res and attempt < times:
|
||||
sleep(delay_secs)
|
||||
res = func(*args, **kwargs)
|
||||
if res:
|
||||
break
|
||||
attempt += 1
|
||||
return res
|
||||
return _wrapped
|
||||
|
||||
return retry_decorator
|
63
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/metadata.yaml
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/metadata.yaml
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
name: kubernetes-master
|
||||
summary: The Kubernetes control plane.
|
||||
maintainers:
|
||||
- Tim Van Steenburgh <tim.van.steenburgh@canonical.com>
|
||||
- George Kraft <george.kraft@canonical.com>
|
||||
- Rye Terrell <rye.terrell@canonical.com>
|
||||
- Konstantinos Tsakalozos <kos.tsakalozos@canonical.com>
|
||||
- Charles Butler <Chuck@dasroot.net>
|
||||
- Matthew Bruzek <mbruzek@ubuntu.com>
|
||||
description: |
|
||||
Kubernetes is an open-source platform for deploying, scaling, and operations
|
||||
of application containers across a cluster of hosts. Kubernetes is portable
|
||||
in that it works with public, private, and hybrid clouds. Extensible through
|
||||
a pluggable infrastructure. Self healing in that it will automatically
|
||||
restart and place containers on healthy nodes if a node ever goes away.
|
||||
tags:
|
||||
- infrastructure
|
||||
- kubernetes
|
||||
- master
|
||||
subordinate: false
|
||||
series:
|
||||
- xenial
|
||||
provides:
|
||||
kube-api-endpoint:
|
||||
interface: http
|
||||
cluster-dns:
|
||||
# kube-dns is deprecated. Its functionality has been rolled into the
|
||||
# kube-control interface. The cluster-dns relation will be removed in
|
||||
# a future release.
|
||||
interface: kube-dns
|
||||
kube-control:
|
||||
interface: kube-control
|
||||
cni:
|
||||
interface: kubernetes-cni
|
||||
scope: container
|
||||
requires:
|
||||
etcd:
|
||||
interface: etcd
|
||||
loadbalancer:
|
||||
interface: public-address
|
||||
ceph-storage:
|
||||
interface: ceph-admin
|
||||
resources:
|
||||
kubectl:
|
||||
type: file
|
||||
filename: kubectl.snap
|
||||
description: kubectl snap
|
||||
kube-apiserver:
|
||||
type: file
|
||||
filename: kube-apiserver.snap
|
||||
description: kube-apiserver snap
|
||||
kube-controller-manager:
|
||||
type: file
|
||||
filename: kube-controller-manager.snap
|
||||
description: kube-controller-manager snap
|
||||
kube-scheduler:
|
||||
type: file
|
||||
filename: kube-scheduler.snap
|
||||
description: kube-scheduler snap
|
||||
cdk-addons:
|
||||
type: file
|
||||
filename: cdk-addons.snap
|
||||
description: CDK addons snap
|
34
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/metrics.yaml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/metrics.yaml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
metrics:
|
||||
juju-units: {}
|
||||
pods:
|
||||
type: gauge
|
||||
description: number of pods
|
||||
command: /snap/bin/kubectl get po --all-namespaces | tail -n+2 | wc -l
|
||||
services:
|
||||
type: gauge
|
||||
description: number of services
|
||||
command: /snap/bin/kubectl get svc --all-namespaces | tail -n+2 | wc -l
|
||||
replicasets:
|
||||
type: gauge
|
||||
description: number of replicasets
|
||||
command: /snap/bin/kubectl get rs --all-namespaces | tail -n+2 | wc -l
|
||||
replicationcontrollers:
|
||||
type: gauge
|
||||
description: number of replicationcontrollers
|
||||
command: /snap/bin/kubectl get rc --all-namespaces | tail -n+2 | wc -l
|
||||
nodes:
|
||||
type: gauge
|
||||
description: number of kubernetes nodes
|
||||
command: /snap/bin/kubectl get nodes | tail -n+2 | wc -l
|
||||
persistentvolume:
|
||||
type: gauge
|
||||
description: number of pv
|
||||
command: /snap/bin/kubectl get pv | tail -n+2 | wc -l
|
||||
persistentvolumeclaims:
|
||||
type: gauge
|
||||
description: number of claims
|
||||
command: /snap/bin/kubectl get pvc --all-namespaces | tail -n+2 | wc -l
|
||||
serviceaccounts:
|
||||
type: gauge
|
||||
description: number of sa
|
||||
command: /snap/bin/kubectl get sa --all-namespaces | tail -n+2 | wc -l
|
1235
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py
generated
vendored
Normal file
1235
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/ceph-secret.yaml
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/ceph-secret.yaml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ceph-secret
|
||||
type: kubernetes.io/rbd
|
||||
data:
|
||||
key: {{ secret }}
|
18
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/ceph.conf
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/ceph.conf
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
[global]
|
||||
auth cluster required = {{ auth_supported }}
|
||||
auth service required = {{ auth_supported }}
|
||||
auth client required = {{ auth_supported }}
|
||||
keyring = /etc/ceph/$cluster.$name.keyring
|
||||
mon host = {{ mon_hosts }}
|
||||
fsid = {{ fsid }}
|
||||
|
||||
log to syslog = {{ use_syslog }}
|
||||
err to syslog = {{ use_syslog }}
|
||||
clog to syslog = {{ use_syslog }}
|
||||
mon cluster log to syslog = {{ use_syslog }}
|
||||
debug mon = {{ loglevel }}/5
|
||||
debug osd = {{ loglevel }}/5
|
||||
|
||||
[client]
|
||||
log file = /var/log/ceph.log
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/create-namespace.yaml.j2
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/create-namespace.yaml.j2
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ name }}
|
||||
labels:
|
||||
name: {{ name }}
|
25
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/rbd-persistent-volume.yaml
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/templates/rbd-persistent-volume.yaml
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# JUJU Internal Template used to enlist RBD volumes from the
|
||||
# `create-rbd-pv` action. This is a temporary file on disk to enlist resources.
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: {{ RBD_NAME }}
|
||||
spec:
|
||||
capacity:
|
||||
storage: {{ RBD_SIZE }}M
|
||||
accessModes:
|
||||
- {{ PV_MODE }}
|
||||
storageClassName: "rbd"
|
||||
rbd:
|
||||
monitors:
|
||||
{% for host in monitors %}
|
||||
- {{ host }}
|
||||
{% endfor %}
|
||||
pool: rbd
|
||||
image: {{ RBD_NAME }}
|
||||
user: admin
|
||||
secretRef:
|
||||
name: ceph-secret
|
||||
fsType: {{ RBD_FS }}
|
||||
readOnly: false
|
||||
# persistentVolumeReclaimPolicy: Recycle
|
12
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/tox.ini
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/tox.ini
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[tox]
|
||||
skipsdist=True
|
||||
envlist = py34, py35
|
||||
skip_missing_interpreters = True
|
||||
|
||||
[testenv]
|
||||
commands = py.test -v
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
[flake8]
|
||||
exclude=docs
|
25
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/HACKING.md
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/HACKING.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Kubernetes Worker
|
||||
|
||||
### Building from the layer
|
||||
|
||||
You can clone the kubernetes-worker layer with git and build locally if you
|
||||
have the charm package/snap installed.
|
||||
|
||||
```shell
|
||||
# Instal the snap
|
||||
sudo snap install charm --channel=edge
|
||||
|
||||
# Set the build environment
|
||||
export JUJU_REPOSITORY=$HOME
|
||||
|
||||
# Clone the layer and build it to our JUJU_REPOSITORY
|
||||
git clone https://github.com/juju-solutions/kubernetes
|
||||
cd kubernetes/cluster/juju/layers/kubernetes-worker
|
||||
charm build -r
|
||||
```
|
||||
|
||||
### Contributing
|
||||
|
||||
TBD
|
||||
|
||||
|
100
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/README.md
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/README.md
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
# Kubernetes Worker
|
||||
|
||||
## Usage
|
||||
|
||||
This charm deploys a container runtime, and additionally stands up the Kubernetes
|
||||
worker applications: kubelet, and kube-proxy.
|
||||
|
||||
In order for this charm to be useful, it should be deployed with its companion
|
||||
charm [kubernetes-master](https://jujucharms.com/u/containers/kubernetes-master)
|
||||
and linked with an SDN-Plugin.
|
||||
|
||||
This charm has also been bundled up for your convenience so you can skip the
|
||||
above steps, and deploy it with a single command:
|
||||
|
||||
```shell
|
||||
juju deploy canonical-kubernetes
|
||||
```
|
||||
|
||||
For more information about [Canonical Kubernetes](https://jujucharms.com/canonical-kubernetes)
|
||||
consult the bundle `README.md` file.
|
||||
|
||||
|
||||
## Scale out
|
||||
|
||||
To add additional compute capacity to your Kubernetes workers, you may
|
||||
`juju add-unit` scale the cluster of applications. They will automatically
|
||||
join any related kubernetes-master, and enlist themselves as ready once the
|
||||
deployment is complete.
|
||||
|
||||
## Operational actions
|
||||
|
||||
The kubernetes-worker charm supports the following Operational Actions:
|
||||
|
||||
#### Pause
|
||||
|
||||
Pausing the workload enables administrators to both [drain](http://kubernetes.io/docs/user-guide/kubectl/kubectl_drain/) and [cordon](http://kubernetes.io/docs/user-guide/kubectl/kubectl_cordon/)
|
||||
a unit for maintenance.
|
||||
|
||||
|
||||
#### Resume
|
||||
|
||||
Resuming the workload will [uncordon](http://kubernetes.io/docs/user-guide/kubectl/kubectl_uncordon/) a paused unit. Workloads will automatically migrate unless otherwise directed via their application declaration.
|
||||
|
||||
## Private registry
|
||||
|
||||
With the "registry" action that is part for the kubernetes-worker charm, you can very easily create a private docker registry, with authentication, and available over TLS. Please note that the registry deployed with the action is not HA, and uses storage tied to the kubernetes node where the pod is running. So if the registry pod changes is migrated from one node to another for whatever reason, you will need to re-publish the images.
|
||||
|
||||
### Example usage
|
||||
|
||||
Create the relevant authentication files. Let's say you want user `userA` to authenticate with the password `passwordA`. Then you'll do :
|
||||
|
||||
echo -n "userA:passwordA" > htpasswd-plain
|
||||
htpasswd -c -b -B htpasswd userA passwordA
|
||||
|
||||
(the `htpasswd` program comes with the `apache2-utils` package)
|
||||
|
||||
Supposing your registry will be reachable at `myregistry.company.com`, and that you already have your TLS key in the `registry.key` file, and your TLS certificate (with `myregistry.company.com` as Common Name) in the `registry.crt` file, you would then run :
|
||||
|
||||
juju run-action kubernetes-worker/0 registry domain=myregistry.company.com htpasswd="$(base64 -w0 htpasswd)" htpasswd-plain="$(base64 -w0 htpasswd-plain)" tlscert="$(base64 -w0 registry.crt)" tlskey="$(base64 -w0 registry.key)" ingress=true
|
||||
|
||||
If you then decide that you want do delete the registry, just run :
|
||||
|
||||
juju run-action kubernetes-worker/0 registry delete=true ingress=true
|
||||
|
||||
## Known Limitations
|
||||
|
||||
Kubernetes workers currently only support 'phaux' HA scenarios. Even when configured with an HA cluster string, they will only ever contact the first unit in the cluster map. To enable a proper HA story, kubernetes-worker units are encouraged to proxy through a [kubeapi-load-balancer](https://jujucharms.com/kubeapi-load-balancer)
|
||||
application. This enables a HA deployment without the need to
|
||||
re-render configuration and disrupt the worker services.
|
||||
|
||||
External access to pods must be performed through a [Kubernetes
|
||||
Ingress Resource](http://kubernetes.io/docs/user-guide/ingress/).
|
||||
|
||||
When using NodePort type networking, there is no automation in exposing the
|
||||
ports selected by kubernetes or chosen by the user. They will need to be
|
||||
opened manually and can be performed across an entire worker pool.
|
||||
|
||||
If your NodePort service port selected is `30510` you can open this across all
|
||||
members of a worker pool named `kubernetes-worker` like so:
|
||||
|
||||
```
|
||||
juju run --application kubernetes-worker open-port 30510/tcp
|
||||
```
|
||||
|
||||
Don't forget to expose the kubernetes-worker application if its not already
|
||||
exposed, as this can cause confusion once the port has been opened and the
|
||||
service is not reachable.
|
||||
|
||||
Note: When debugging connection issues with NodePort services, its important
|
||||
to first check the kube-proxy service on the worker units. If kube-proxy is not
|
||||
running, the associated port-mapping will not be configured in the iptables
|
||||
rulechains.
|
||||
|
||||
If you need to close the NodePort once a workload has been terminated, you can
|
||||
follow the same steps inversely.
|
||||
|
||||
```
|
||||
juju run --application kubernetes-worker close-port 30510
|
||||
```
|
||||
|
56
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions.yaml
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions.yaml
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
pause:
|
||||
description: |
|
||||
Cordon the unit, draining all active workloads.
|
||||
params:
|
||||
delete-local-data:
|
||||
type: boolean
|
||||
description: Force deletion of local storage to enable a drain
|
||||
default: False
|
||||
force:
|
||||
type: boolean
|
||||
description: |
|
||||
Continue even if there are pods not managed by a RC, RS, Job, DS or SS
|
||||
default: False
|
||||
|
||||
resume:
|
||||
description: |
|
||||
UnCordon the unit, enabling workload scheduling.
|
||||
microbot:
|
||||
description: Launch microbot containers
|
||||
params:
|
||||
replicas:
|
||||
type: integer
|
||||
default: 3
|
||||
description: Number of microbots to launch in Kubernetes.
|
||||
delete:
|
||||
type: boolean
|
||||
default: False
|
||||
description: Remove a microbots deployment, service, and ingress if True.
|
||||
upgrade:
|
||||
description: Upgrade the kubernetes snaps
|
||||
registry:
|
||||
description: Create a private Docker registry
|
||||
params:
|
||||
htpasswd:
|
||||
type: string
|
||||
description: base64 encoded htpasswd file used for authentication.
|
||||
htpasswd-plain:
|
||||
type: string
|
||||
description: base64 encoded plaintext version of the htpasswd file, needed by docker daemons to authenticate to the registry.
|
||||
tlscert:
|
||||
type: string
|
||||
description: base64 encoded TLS certificate for the registry. Common Name must match the domain name of the registry.
|
||||
tlskey:
|
||||
type: string
|
||||
description: base64 encoded TLS key for the registry.
|
||||
domain:
|
||||
type: string
|
||||
description: The domain name for the registry. Must match the Common Name of the certificate.
|
||||
ingress:
|
||||
type: boolean
|
||||
default: false
|
||||
description: Create an Ingress resource for the registry (or delete resource object if "delete" is True)
|
||||
delete:
|
||||
type: boolean
|
||||
default: false
|
||||
description: Remove a registry replication controller, service, and ingress if True.
|
73
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/microbot
generated
vendored
Executable file
73
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/microbot
generated
vendored
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from charmhelpers.core.hookenv import action_get
|
||||
from charmhelpers.core.hookenv import action_set
|
||||
from charmhelpers.core.hookenv import unit_public_ip
|
||||
from charms.templating.jinja2 import render
|
||||
from subprocess import call
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
|
||||
context = {}
|
||||
context['replicas'] = action_get('replicas')
|
||||
context['delete'] = action_get('delete')
|
||||
context['public_address'] = unit_public_ip()
|
||||
|
||||
if not context['replicas']:
|
||||
context['replicas'] = 3
|
||||
|
||||
# Declare a kubectl template when invoking kubectl
|
||||
kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
|
||||
|
||||
# Remove deployment if requested
|
||||
if context['delete']:
|
||||
service_del = kubectl + ['delete', 'svc', 'microbot']
|
||||
service_response = call(service_del)
|
||||
deploy_del = kubectl + ['delete', 'deployment', 'microbot']
|
||||
deploy_response = call(deploy_del)
|
||||
ingress_del = kubectl + ['delete', 'ing', 'microbot-ingress']
|
||||
ingress_response = call(ingress_del)
|
||||
|
||||
if ingress_response != 0:
|
||||
action_set({'microbot-ing':
|
||||
'Failed removal of microbot ingress resource.'})
|
||||
if deploy_response != 0:
|
||||
action_set({'microbot-deployment':
|
||||
'Failed removal of microbot deployment resource.'})
|
||||
if service_response != 0:
|
||||
action_set({'microbot-service':
|
||||
'Failed removal of microbot service resource.'})
|
||||
sys.exit(0)
|
||||
|
||||
# Creation request
|
||||
|
||||
render('microbot-example.yaml', '/root/cdk/addons/microbot.yaml',
|
||||
context)
|
||||
|
||||
create_command = kubectl + ['create', '-f',
|
||||
'/root/cdk/addons/microbot.yaml']
|
||||
|
||||
create_response = call(create_command)
|
||||
|
||||
if create_response == 0:
|
||||
action_set({'address':
|
||||
'microbot.{}.xip.io'.format(context['public_address'])})
|
||||
else:
|
||||
action_set({'microbot-create': 'Failed microbot creation.'})
|
28
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/pause
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/pause
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
export PATH=$PATH:/snap/bin
|
||||
|
||||
DELETE_LOCAL_DATA=$(action-get delete-local-data)
|
||||
FORCE=$(action-get force)
|
||||
|
||||
# placeholder for additional flags to the command
|
||||
export EXTRA_FLAGS=""
|
||||
|
||||
# Determine if we have extra flags
|
||||
if [[ "${DELETE_LOCAL_DATA}" == "True" || "${DELETE_LOCAL_DATA}" == "true" ]]; then
|
||||
EXTRA_FLAGS="${EXTRA_FLAGS} --delete-local-data=true"
|
||||
fi
|
||||
|
||||
if [[ "${FORCE}" == "True" || "${FORCE}" == "true" ]]; then
|
||||
EXTRA_FLAGS="${EXTRA_FLAGS} --force"
|
||||
fi
|
||||
|
||||
|
||||
# Cordon and drain the unit
|
||||
kubectl --kubeconfig=/root/.kube/config cordon $(hostname)
|
||||
kubectl --kubeconfig=/root/.kube/config drain $(hostname) ${EXTRA_FLAGS}
|
||||
|
||||
# Set status to indicate the unit is paused and under maintenance.
|
||||
status-set 'waiting' 'Kubernetes unit paused'
|
136
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/registry
generated
vendored
Executable file
136
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/registry
generated
vendored
Executable file
@ -0,0 +1,136 @@
|
||||
#!/usr/bin/python3
|
||||
#
|
||||
# For a usage examples, see README.md
|
||||
#
|
||||
# TODO
|
||||
#
|
||||
# - make the action idempotent (i.e. if you run it multiple times, the first
|
||||
# run will create/delete the registry, and the reset will be a no-op and won't
|
||||
# error out)
|
||||
#
|
||||
# - take only a plain authentication file, and create the encrypted version in
|
||||
# the action
|
||||
#
|
||||
# - validate the parameters (make sure tlscert is a certificate, that tlskey is a
|
||||
# proper key, etc)
|
||||
#
|
||||
# - when https://bugs.launchpad.net/juju/+bug/1661015 is fixed, handle the
|
||||
# base64 encoding the parameters in the action itself
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from base64 import b64encode
|
||||
|
||||
from charmhelpers.core.hookenv import action_get
|
||||
from charmhelpers.core.hookenv import action_set
|
||||
from charms.templating.jinja2 import render
|
||||
from subprocess import call
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
|
||||
deletion = action_get('delete')
|
||||
|
||||
context = {}
|
||||
|
||||
# These config options must be defined in the case of a creation
|
||||
param_error = False
|
||||
for param in ('tlscert', 'tlskey', 'domain', 'htpasswd', 'htpasswd-plain'):
|
||||
value = action_get(param)
|
||||
if not value and not deletion:
|
||||
key = "registry-create-parameter-{}".format(param)
|
||||
error = "failure, parameter {} is required".format(param)
|
||||
action_set({key: error})
|
||||
param_error = True
|
||||
|
||||
context[param] = value
|
||||
|
||||
# Create the dockercfg template variable
|
||||
dockercfg = '{"%s": {"auth": "%s", "email": "root@localhost"}}' % \
|
||||
(context['domain'], context['htpasswd-plain'])
|
||||
context['dockercfg'] = b64encode(dockercfg.encode()).decode('ASCII')
|
||||
|
||||
if param_error:
|
||||
sys.exit(0)
|
||||
|
||||
# This one is either true or false, no need to check if it has a "good" value.
|
||||
context['ingress'] = action_get('ingress')
|
||||
|
||||
# Declare a kubectl template when invoking kubectl
|
||||
kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
|
||||
|
||||
# Remove deployment if requested
|
||||
if deletion:
|
||||
resources = ['svc/kube-registry', 'rc/kube-registry-v0', 'secrets/registry-tls-data',
|
||||
'secrets/registry-auth-data', 'secrets/registry-access']
|
||||
|
||||
if action_get('ingress'):
|
||||
resources.append('ing/registry-ing')
|
||||
|
||||
delete_command = kubectl + ['delete', '--ignore-not-found=true'] + resources
|
||||
delete_response = call(delete_command)
|
||||
if delete_response == 0:
|
||||
action_set({'registry-delete': 'success'})
|
||||
else:
|
||||
action_set({'registry-delete': 'failure'})
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
# Creation request
|
||||
render('registry.yaml', '/root/cdk/addons/registry.yaml',
|
||||
context)
|
||||
|
||||
create_command = kubectl + ['create', '-f',
|
||||
'/root/cdk/addons/registry.yaml']
|
||||
|
||||
create_response = call(create_command)
|
||||
|
||||
if create_response == 0:
|
||||
action_set({'registry-create': 'success'})
|
||||
|
||||
# Create a ConfigMap if it doesn't exist yet, else patch it.
|
||||
# A ConfigMap is needed to change the default value for nginx' client_max_body_size.
|
||||
# The default is 1MB, and this is the maximum size of images that can be
|
||||
# pushed on the registry. 1MB images aren't useful, so we bump this value to 1024MB.
|
||||
cm_name = 'nginx-load-balancer-conf'
|
||||
check_cm_command = kubectl + ['get', 'cm', cm_name]
|
||||
check_cm_response = call(check_cm_command)
|
||||
|
||||
if check_cm_response == 0:
|
||||
# There is an existing ConfigMap, patch it
|
||||
patch = '{"data":{"body-size":"1024m"}}'
|
||||
patch_cm_command = kubectl + ['patch', 'cm', cm_name, '-p', patch]
|
||||
patch_cm_response = call(patch_cm_command)
|
||||
|
||||
if patch_cm_response == 0:
|
||||
action_set({'configmap-patch': 'success'})
|
||||
else:
|
||||
action_set({'configmap-patch': 'failure'})
|
||||
|
||||
else:
|
||||
# No existing ConfigMap, create it
|
||||
render('registry-configmap.yaml', '/root/cdk/addons/registry-configmap.yaml',
|
||||
context)
|
||||
create_cm_command = kubectl + ['create', '-f', '/root/cdk/addons/registry-configmap.yaml']
|
||||
create_cm_response = call(create_cm_command)
|
||||
|
||||
if create_cm_response == 0:
|
||||
action_set({'configmap-create': 'success'})
|
||||
else:
|
||||
action_set({'configmap-create': 'failure'})
|
||||
|
||||
# Patch the "default" serviceaccount with an imagePullSecret.
|
||||
# This will allow the docker daemons to authenticate to our private
|
||||
# registry automatically
|
||||
patch = '{"imagePullSecrets":[{"name":"registry-access"}]}'
|
||||
patch_sa_command = kubectl + ['patch', 'sa', 'default', '-p', patch]
|
||||
patch_sa_response = call(patch_sa_command)
|
||||
|
||||
if patch_sa_response == 0:
|
||||
action_set({'serviceaccount-patch': 'success'})
|
||||
else:
|
||||
action_set({'serviceaccount-patch': 'failure'})
|
||||
|
||||
|
||||
else:
|
||||
action_set({'registry-create': 'failure'})
|
8
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/resume
generated
vendored
Executable file
8
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/resume
generated
vendored
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
export PATH=$PATH:/snap/bin
|
||||
|
||||
kubectl --kubeconfig=/root/.kube/config uncordon $(hostname)
|
||||
status-set 'active' 'Kubernetes unit resumed'
|
5
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/upgrade
generated
vendored
Executable file
5
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/actions/upgrade
generated
vendored
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
set -eux
|
||||
|
||||
charms.reactive set_state kubernetes-worker.snaps.upgrade-specified
|
||||
exec hooks/config-changed
|
51
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/config.yaml
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/config.yaml
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
options:
|
||||
ingress:
|
||||
type: boolean
|
||||
default: true
|
||||
description: |
|
||||
Deploy the default http backend and ingress controller to handle
|
||||
ingress requests.
|
||||
labels:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Labels can be used to organize and to select subsets of nodes in the
|
||||
cluster. Declare node labels in key=value format, separated by spaces.
|
||||
allow-privileged:
|
||||
type: string
|
||||
default: "auto"
|
||||
description: |
|
||||
Allow privileged containers to run on worker nodes. Supported values are
|
||||
"true", "false", and "auto". If "true", kubelet will run in privileged
|
||||
mode by default. If "false", kubelet will never run in privileged mode.
|
||||
If "auto", kubelet will not run in privileged mode by default, but will
|
||||
switch to privileged mode if gpu hardware is detected.
|
||||
channel:
|
||||
type: string
|
||||
default: "1.8/stable"
|
||||
description: |
|
||||
Snap channel to install Kubernetes worker services from
|
||||
require-manual-upgrade:
|
||||
type: boolean
|
||||
default: true
|
||||
description: |
|
||||
When true, worker services will not be upgraded until the user triggers
|
||||
it manually by running the upgrade action.
|
||||
kubelet-extra-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space separated list of flags and key=value pairs that will be passed as arguments to
|
||||
kubelet. For example a value like this:
|
||||
runtime-config=batch/v2alpha1=true profiling=true
|
||||
will result in kube-apiserver being run with the following options:
|
||||
--runtime-config=batch/v2alpha1=true --profiling=true
|
||||
proxy-extra-args:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Space separated list of flags and key=value pairs that will be passed as arguments to
|
||||
kube-proxy. For example a value like this:
|
||||
runtime-config=batch/v2alpha1=true profiling=true
|
||||
will result in kube-apiserver being run with the following options:
|
||||
--runtime-config=batch/v2alpha1=true --profiling=true
|
13
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/copyright
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/copyright
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
8
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/debug-scripts/inotify
generated
vendored
Executable file
8
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/debug-scripts/inotify
generated
vendored
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
set -ux
|
||||
|
||||
# We had to bump inotify limits once in the past, hence why this oddly specific
|
||||
# script lives here in kubernetes-worker.
|
||||
|
||||
sysctl fs.inotify > $DEBUG_SCRIPT_DIR/sysctl-limits
|
||||
ls -l /proc/*/fd/* | grep inotify > $DEBUG_SCRIPT_DIR/inotify-instances
|
15
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/debug-scripts/kubectl
generated
vendored
Executable file
15
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/debug-scripts/kubectl
generated
vendored
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
set -ux
|
||||
|
||||
export PATH=$PATH:/snap/bin
|
||||
|
||||
alias kubectl="kubectl --kubeconfig=/root/cdk/kubeconfig"
|
||||
|
||||
kubectl cluster-info > $DEBUG_SCRIPT_DIR/cluster-info
|
||||
kubectl cluster-info dump > $DEBUG_SCRIPT_DIR/cluster-info-dump
|
||||
for obj in pods svc ingress secrets pv pvc rc; do
|
||||
kubectl describe $obj --all-namespaces > $DEBUG_SCRIPT_DIR/describe-$obj
|
||||
done
|
||||
for obj in nodes; do
|
||||
kubectl describe $obj > $DEBUG_SCRIPT_DIR/describe-$obj
|
||||
done
|
9
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/debug-scripts/kubernetes-worker-services
generated
vendored
Executable file
9
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/debug-scripts/kubernetes-worker-services
generated
vendored
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
set -ux
|
||||
|
||||
for service in kubelet kube-proxy; do
|
||||
systemctl status snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-systemctl-status
|
||||
journalctl -u snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-journal
|
||||
done
|
||||
|
||||
# FIXME: get the snap config or something
|
2
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/exec.d/docker-compose/charm-pre-install
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/exec.d/docker-compose/charm-pre-install
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
# This stubs out charm-pre-install coming from layer-docker as a workaround for
|
||||
# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed.
|
17
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/exec.d/vmware-patch/charm-pre-install
generated
vendored
Executable file
17
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/exec.d/vmware-patch/charm-pre-install
generated
vendored
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
MY_HOSTNAME=$(hostname)
|
||||
|
||||
: ${JUJU_UNIT_NAME:=`uuidgen`}
|
||||
|
||||
|
||||
if [ "${MY_HOSTNAME}" == "ubuntuguest" ]; then
|
||||
juju-log "Detected broken vsphere integration. Applying hostname override"
|
||||
|
||||
FRIENDLY_HOSTNAME=$(echo $JUJU_UNIT_NAME | tr / -)
|
||||
juju-log "Setting hostname to $FRIENDLY_HOSTNAME"
|
||||
if [ ! -f /etc/hostname.orig ]; then
|
||||
mv /etc/hostname /etc/hostname.orig
|
||||
fi
|
||||
echo "${FRIENDLY_HOSTNAME}" > /etc/hostname
|
||||
hostname $FRIENDLY_HOSTNAME
|
||||
fi
|
362
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/icon.svg
generated
vendored
Normal file
362
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/icon.svg
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 26 KiB |
36
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/layer.yaml
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/layer.yaml
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
repo: https://github.com/kubernetes/kubernetes.git
|
||||
includes:
|
||||
- 'layer:basic'
|
||||
- 'layer:debug'
|
||||
- 'layer:snap'
|
||||
- 'layer:docker'
|
||||
- 'layer:metrics'
|
||||
- 'layer:nagios'
|
||||
- 'layer:tls-client'
|
||||
- 'layer:nvidia-cuda'
|
||||
- 'layer:cdk-service-kicker'
|
||||
- 'interface:http'
|
||||
- 'interface:kubernetes-cni'
|
||||
- 'interface:kube-dns'
|
||||
- 'interface:kube-control'
|
||||
config:
|
||||
deletes:
|
||||
- install_from_upstream
|
||||
options:
|
||||
basic:
|
||||
packages:
|
||||
- 'cifs-utils'
|
||||
- 'ceph-common'
|
||||
- 'nfs-common'
|
||||
- 'socat'
|
||||
- 'virt-what'
|
||||
tls-client:
|
||||
ca_certificate_path: '/root/cdk/ca.crt'
|
||||
server_certificate_path: '/root/cdk/server.crt'
|
||||
server_key_path: '/root/cdk/server.key'
|
||||
client_certificate_path: '/root/cdk/client.crt'
|
||||
client_key_path: '/root/cdk/client.key'
|
||||
cdk-service-kicker:
|
||||
services:
|
||||
- 'snap.kubelet.daemon'
|
||||
- 'snap.kube-proxy.daemon'
|
35
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
|
||||
def get_version(bin_name):
|
||||
"""Get the version of an installed Kubernetes binary.
|
||||
|
||||
:param str bin_name: Name of binary
|
||||
:return: 3-tuple version (maj, min, patch)
|
||||
|
||||
Example::
|
||||
|
||||
>>> `get_version('kubelet')
|
||||
(1, 6, 0)
|
||||
|
||||
"""
|
||||
cmd = '{} --version'.format(bin_name).split()
|
||||
version_string = subprocess.check_output(cmd).decode('utf-8')
|
||||
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
55
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/metadata.yaml
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/metadata.yaml
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: kubernetes-worker
|
||||
summary: The workload bearing units of a kubernetes cluster
|
||||
maintainers:
|
||||
- Tim Van Steenburgh <tim.van.steenburgh@canonical.com>
|
||||
- George Kraft <george.kraft@canonical.com>
|
||||
- Rye Terrell <rye.terrell@canonical.com>
|
||||
- Konstantinos Tsakalozos <kos.tsakalozos@canonical.com>
|
||||
- Charles Butler <Chuck@dasroot.net>
|
||||
- Matthew Bruzek <mbruzek@ubuntu.com>
|
||||
description: |
|
||||
Kubernetes is an open-source platform for deploying, scaling, and operations
|
||||
of application containers across a cluster of hosts. Kubernetes is portable
|
||||
in that it works with public, private, and hybrid clouds. Extensible through
|
||||
a pluggable infrastructure. Self healing in that it will automatically
|
||||
restart and place containers on healthy nodes if a node ever goes away.
|
||||
tags:
|
||||
- misc
|
||||
series:
|
||||
- xenial
|
||||
subordinate: false
|
||||
requires:
|
||||
kube-api-endpoint:
|
||||
interface: http
|
||||
kube-dns:
|
||||
# kube-dns is deprecated. Its functionality has been rolled into the
|
||||
# kube-control interface. The kube-dns relation will be removed in
|
||||
# a future release.
|
||||
interface: kube-dns
|
||||
kube-control:
|
||||
interface: kube-control
|
||||
provides:
|
||||
cni:
|
||||
interface: kubernetes-cni
|
||||
scope: container
|
||||
resources:
|
||||
cni-amd64:
|
||||
type: file
|
||||
filename: cni.tgz
|
||||
description: CNI plugins for amd64
|
||||
cni-s390x:
|
||||
type: file
|
||||
filename: cni.tgz
|
||||
description: CNI plugins for s390x
|
||||
kubectl:
|
||||
type: file
|
||||
filename: kubectl.snap
|
||||
description: kubectl snap
|
||||
kubelet:
|
||||
type: file
|
||||
filename: kubelet.snap
|
||||
description: kubelet snap
|
||||
kube-proxy:
|
||||
type: file
|
||||
filename: kube-proxy.snap
|
||||
description: kube-proxy snap
|
2
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/metrics.yaml
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/metrics.yaml
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
metrics:
|
||||
juju-units: {}
|
945
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
generated
vendored
Normal file
945
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
generated
vendored
Normal file
@ -0,0 +1,945 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
from shlex import split
|
||||
from subprocess import check_call, check_output
|
||||
from subprocess import CalledProcessError
|
||||
from socket import gethostname
|
||||
|
||||
from charms import layer
|
||||
from charms.layer import snap
|
||||
from charms.reactive import hook
|
||||
from charms.reactive import set_state, remove_state, is_state
|
||||
from charms.reactive import when, when_any, when_not
|
||||
|
||||
from charms.kubernetes.common import get_version
|
||||
|
||||
from charms.reactive.helpers import data_changed, any_file_changed
|
||||
from charms.templating.jinja2 import render
|
||||
|
||||
from charmhelpers.core import hookenv, unitdata
|
||||
from charmhelpers.core.host import service_stop, service_restart
|
||||
from charmhelpers.contrib.charmsupport import nrpe
|
||||
|
||||
# Override the default nagios shortname regex to allow periods, which we
|
||||
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
|
||||
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
|
||||
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
|
||||
|
||||
kubeconfig_path = '/root/cdk/kubeconfig'
|
||||
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
|
||||
kubeclientconfig_path = '/root/.kube/config'
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
db = unitdata.kv()
|
||||
|
||||
|
||||
@hook('upgrade-charm')
|
||||
def upgrade_charm():
|
||||
# Trigger removal of PPA docker installation if it was previously set.
|
||||
set_state('config.changed.install_from_upstream')
|
||||
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
|
||||
|
||||
cleanup_pre_snap_services()
|
||||
check_resources_for_upgrade_needed()
|
||||
|
||||
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
|
||||
# since they can differ between k8s versions
|
||||
remove_state('kubernetes-worker.gpu.enabled')
|
||||
|
||||
remove_state('kubernetes-worker.cni-plugins.installed')
|
||||
remove_state('kubernetes-worker.config.created')
|
||||
remove_state('kubernetes-worker.ingress.available')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
def check_resources_for_upgrade_needed():
|
||||
hookenv.status_set('maintenance', 'Checking resources')
|
||||
resources = ['kubectl', 'kubelet', 'kube-proxy']
|
||||
paths = [hookenv.resource_get(resource) for resource in resources]
|
||||
if any_file_changed(paths):
|
||||
set_upgrade_needed()
|
||||
|
||||
|
||||
def set_upgrade_needed():
|
||||
set_state('kubernetes-worker.snaps.upgrade-needed')
|
||||
config = hookenv.config()
|
||||
previous_channel = config.previous('channel')
|
||||
require_manual = config.get('require-manual-upgrade')
|
||||
if previous_channel is None or not require_manual:
|
||||
set_state('kubernetes-worker.snaps.upgrade-specified')
|
||||
|
||||
|
||||
def cleanup_pre_snap_services():
|
||||
# remove old states
|
||||
remove_state('kubernetes-worker.components.installed')
|
||||
|
||||
# disable old services
|
||||
services = ['kubelet', 'kube-proxy']
|
||||
for service in services:
|
||||
hookenv.log('Stopping {0} service.'.format(service))
|
||||
service_stop(service)
|
||||
|
||||
# cleanup old files
|
||||
files = [
|
||||
"/lib/systemd/system/kubelet.service",
|
||||
"/lib/systemd/system/kube-proxy.service",
|
||||
"/etc/default/kube-default",
|
||||
"/etc/default/kubelet",
|
||||
"/etc/default/kube-proxy",
|
||||
"/srv/kubernetes",
|
||||
"/usr/local/bin/kubectl",
|
||||
"/usr/local/bin/kubelet",
|
||||
"/usr/local/bin/kube-proxy",
|
||||
"/etc/kubernetes"
|
||||
]
|
||||
for file in files:
|
||||
if os.path.isdir(file):
|
||||
hookenv.log("Removing directory: " + file)
|
||||
shutil.rmtree(file)
|
||||
elif os.path.isfile(file):
|
||||
hookenv.log("Removing file: " + file)
|
||||
os.remove(file)
|
||||
|
||||
|
||||
@when('config.changed.channel')
|
||||
def channel_changed():
|
||||
set_upgrade_needed()
|
||||
|
||||
|
||||
@when('kubernetes-worker.snaps.upgrade-needed')
|
||||
@when_not('kubernetes-worker.snaps.upgrade-specified')
|
||||
def upgrade_needed_status():
|
||||
msg = 'Needs manual upgrade, run the upgrade action'
|
||||
hookenv.status_set('blocked', msg)
|
||||
|
||||
|
||||
@when('kubernetes-worker.snaps.upgrade-specified')
|
||||
def install_snaps():
|
||||
check_resources_for_upgrade_needed()
|
||||
channel = hookenv.config('channel')
|
||||
hookenv.status_set('maintenance', 'Installing kubectl snap')
|
||||
snap.install('kubectl', channel=channel, classic=True)
|
||||
hookenv.status_set('maintenance', 'Installing kubelet snap')
|
||||
snap.install('kubelet', channel=channel, classic=True)
|
||||
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
|
||||
snap.install('kube-proxy', channel=channel, classic=True)
|
||||
set_state('kubernetes-worker.snaps.installed')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
remove_state('kubernetes-worker.snaps.upgrade-needed')
|
||||
remove_state('kubernetes-worker.snaps.upgrade-specified')
|
||||
|
||||
|
||||
@hook('stop')
|
||||
def shutdown():
|
||||
''' When this unit is destroyed:
|
||||
- delete the current node
|
||||
- stop the worker services
|
||||
'''
|
||||
try:
|
||||
if os.path.isfile(kubeconfig_path):
|
||||
kubectl('delete', 'node', gethostname())
|
||||
except CalledProcessError:
|
||||
hookenv.log('Failed to unregister node.')
|
||||
service_stop('snap.kubelet.daemon')
|
||||
service_stop('snap.kube-proxy.daemon')
|
||||
|
||||
|
||||
@when('docker.available')
|
||||
@when_not('kubernetes-worker.cni-plugins.installed')
|
||||
def install_cni_plugins():
|
||||
''' Unpack the cni-plugins resource '''
|
||||
charm_dir = os.getenv('CHARM_DIR')
|
||||
|
||||
# Get the resource via resource_get
|
||||
try:
|
||||
resource_name = 'cni-{}'.format(arch())
|
||||
archive = hookenv.resource_get(resource_name)
|
||||
except Exception:
|
||||
message = 'Error fetching the cni resource.'
|
||||
hookenv.log(message)
|
||||
hookenv.status_set('blocked', message)
|
||||
return
|
||||
|
||||
if not archive:
|
||||
hookenv.log('Missing cni resource.')
|
||||
hookenv.status_set('blocked', 'Missing cni resource.')
|
||||
return
|
||||
|
||||
# Handle null resource publication, we check if filesize < 1mb
|
||||
filesize = os.stat(archive).st_size
|
||||
if filesize < 1000000:
|
||||
hookenv.status_set('blocked', 'Incomplete cni resource.')
|
||||
return
|
||||
|
||||
hookenv.status_set('maintenance', 'Unpacking cni resource.')
|
||||
|
||||
unpack_path = '{}/files/cni'.format(charm_dir)
|
||||
os.makedirs(unpack_path, exist_ok=True)
|
||||
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
|
||||
hookenv.log(cmd)
|
||||
check_call(cmd)
|
||||
|
||||
apps = [
|
||||
{'name': 'loopback', 'path': '/opt/cni/bin'}
|
||||
]
|
||||
|
||||
for app in apps:
|
||||
unpacked = '{}/{}'.format(unpack_path, app['name'])
|
||||
app_path = os.path.join(app['path'], app['name'])
|
||||
install = ['install', '-v', '-D', unpacked, app_path]
|
||||
hookenv.log(install)
|
||||
check_call(install)
|
||||
|
||||
# Used by the "registry" action. The action is run on a single worker, but
|
||||
# the registry pod can end up on any worker, so we need this directory on
|
||||
# all the workers.
|
||||
os.makedirs('/srv/registry', exist_ok=True)
|
||||
|
||||
set_state('kubernetes-worker.cni-plugins.installed')
|
||||
|
||||
|
||||
@when('kubernetes-worker.snaps.installed')
|
||||
def set_app_version():
|
||||
''' Declare the application version to juju '''
|
||||
cmd = ['kubelet', '--version']
|
||||
version = check_output(cmd)
|
||||
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
|
||||
|
||||
|
||||
@when('kubernetes-worker.snaps.installed')
|
||||
@when_not('kube-control.dns.available')
|
||||
def notify_user_transient_status():
|
||||
''' Notify to the user we are in a transient state and the application
|
||||
is still converging. Potentially remotely, or we may be in a detached loop
|
||||
wait state '''
|
||||
|
||||
# During deployment the worker has to start kubelet without cluster dns
|
||||
# configured. If this is the first unit online in a service pool waiting
|
||||
# to self host the dns pod, and configure itself to query the dns service
|
||||
# declared in the kube-system namespace
|
||||
|
||||
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
|
||||
|
||||
|
||||
@when('kubernetes-worker.snaps.installed',
|
||||
'kube-control.dns.available')
|
||||
@when_not('kubernetes-worker.snaps.upgrade-needed')
|
||||
def charm_status(kube_control):
|
||||
'''Update the status message with the current status of kubelet.'''
|
||||
update_kubelet_status()
|
||||
|
||||
|
||||
def update_kubelet_status():
|
||||
''' There are different states that the kubelet can be in, where we are
|
||||
waiting for dns, waiting for cluster turnup, or ready to serve
|
||||
applications.'''
|
||||
services = [
|
||||
'kubelet',
|
||||
'kube-proxy'
|
||||
]
|
||||
failing_services = []
|
||||
for service in services:
|
||||
daemon = 'snap.{}.daemon'.format(service)
|
||||
if not _systemctl_is_active(daemon):
|
||||
failing_services.append(service)
|
||||
|
||||
if len(failing_services) == 0:
|
||||
hookenv.status_set('active', 'Kubernetes worker running.')
|
||||
else:
|
||||
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
|
||||
hookenv.status_set('waiting', msg)
|
||||
|
||||
|
||||
@when('certificates.available')
|
||||
def send_data(tls):
|
||||
'''Send the data that is required to create a server certificate for
|
||||
this server.'''
|
||||
# Use the public ip of this unit as the Common Name for the certificate.
|
||||
common_name = hookenv.unit_public_ip()
|
||||
|
||||
# Create SANs that the tls layer will add to the server cert.
|
||||
sans = [
|
||||
hookenv.unit_public_ip(),
|
||||
hookenv.unit_private_ip(),
|
||||
gethostname()
|
||||
]
|
||||
|
||||
# Create a path safe name by removing path characters from the unit name.
|
||||
certificate_name = hookenv.local_unit().replace('/', '_')
|
||||
|
||||
# Request a server cert with this information.
|
||||
tls.request_server_cert(common_name, sans, certificate_name)
|
||||
|
||||
|
||||
@when('kube-api-endpoint.available', 'kube-control.dns.available',
|
||||
'cni.available')
|
||||
def watch_for_changes(kube_api, kube_control, cni):
|
||||
''' Watch for configuration changes and signal if we need to restart the
|
||||
worker services '''
|
||||
servers = get_kube_api_servers(kube_api)
|
||||
dns = kube_control.get_dns()
|
||||
cluster_cidr = cni.get_config()['cidr']
|
||||
|
||||
if (data_changed('kube-api-servers', servers) or
|
||||
data_changed('kube-dns', dns) or
|
||||
data_changed('cluster-cidr', cluster_cidr)):
|
||||
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
|
||||
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
|
||||
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
|
||||
'tls_client.server.key.saved',
|
||||
'kube-control.dns.available', 'kube-control.auth.available',
|
||||
'cni.available', 'kubernetes-worker.restart-needed',
|
||||
'worker.auth.bootstrapped')
|
||||
def start_worker(kube_api, kube_control, auth_control, cni):
|
||||
''' Start kubelet using the provided API and DNS info.'''
|
||||
servers = get_kube_api_servers(kube_api)
|
||||
# Note that the DNS server doesn't necessarily exist at this point. We know
|
||||
# what its IP will eventually be, though, so we can go ahead and configure
|
||||
# kubelet with that info. This ensures that early pods are configured with
|
||||
# the correct DNS even though the server isn't ready yet.
|
||||
|
||||
dns = kube_control.get_dns()
|
||||
cluster_cidr = cni.get_config()['cidr']
|
||||
|
||||
if cluster_cidr is None:
|
||||
hookenv.log('Waiting for cluster cidr.')
|
||||
return
|
||||
|
||||
creds = db.get('credentials')
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
# set --allow-privileged flag for kubelet
|
||||
set_privileged()
|
||||
|
||||
create_config(random.choice(servers), creds)
|
||||
configure_kubelet(dns)
|
||||
configure_kube_proxy(servers, cluster_cidr)
|
||||
set_state('kubernetes-worker.config.created')
|
||||
restart_unit_services()
|
||||
update_kubelet_status()
|
||||
apply_node_labels()
|
||||
remove_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
@when('cni.connected')
|
||||
@when_not('cni.configured')
|
||||
def configure_cni(cni):
|
||||
''' Set worker configuration on the CNI relation. This lets the CNI
|
||||
subordinate know that we're the worker so it can respond accordingly. '''
|
||||
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
|
||||
|
||||
|
||||
@when('config.changed.ingress')
|
||||
def toggle_ingress_state():
|
||||
''' Ingress is a toggled state. Remove ingress.available if set when
|
||||
toggled '''
|
||||
remove_state('kubernetes-worker.ingress.available')
|
||||
|
||||
|
||||
@when('docker.sdn.configured')
|
||||
def sdn_changed():
|
||||
'''The Software Defined Network changed on the container so restart the
|
||||
kubernetes services.'''
|
||||
restart_unit_services()
|
||||
update_kubelet_status()
|
||||
remove_state('docker.sdn.configured')
|
||||
|
||||
|
||||
@when('kubernetes-worker.config.created')
|
||||
@when_not('kubernetes-worker.ingress.available')
|
||||
def render_and_launch_ingress():
|
||||
''' If configuration has ingress RC enabled, launch the ingress load
|
||||
balancer and default http backend. Otherwise attempt deletion. '''
|
||||
config = hookenv.config()
|
||||
# If ingress is enabled, launch the ingress controller
|
||||
if config.get('ingress'):
|
||||
launch_default_ingress_controller()
|
||||
else:
|
||||
hookenv.log('Deleting the http backend and ingress.')
|
||||
kubectl_manifest('delete',
|
||||
'/root/cdk/addons/default-http-backend.yaml')
|
||||
kubectl_manifest('delete',
|
||||
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
|
||||
hookenv.close_port(80)
|
||||
hookenv.close_port(443)
|
||||
|
||||
|
||||
@when('kubernetes-worker.ingress.available')
|
||||
def scale_ingress_controller():
|
||||
''' Scale the number of ingress controller replicas to match the number of
|
||||
nodes. '''
|
||||
try:
|
||||
output = kubectl('get', 'nodes', '-o', 'name')
|
||||
count = len(output.splitlines())
|
||||
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
|
||||
except CalledProcessError:
|
||||
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
|
||||
|
||||
|
||||
@when('config.changed.labels', 'kubernetes-worker.config.created')
|
||||
def apply_node_labels():
|
||||
''' Parse the labels configuration option and apply the labels to the node.
|
||||
'''
|
||||
# scrub and try to format an array from the configuration option
|
||||
config = hookenv.config()
|
||||
user_labels = _parse_labels(config.get('labels'))
|
||||
|
||||
# For diffing sake, iterate the previous label set
|
||||
if config.previous('labels'):
|
||||
previous_labels = _parse_labels(config.previous('labels'))
|
||||
hookenv.log('previous labels: {}'.format(previous_labels))
|
||||
else:
|
||||
# this handles first time run if there is no previous labels config
|
||||
previous_labels = _parse_labels("")
|
||||
|
||||
# Calculate label removal
|
||||
for label in previous_labels:
|
||||
if label not in user_labels:
|
||||
hookenv.log('Deleting node label {}'.format(label))
|
||||
_apply_node_label(label, delete=True)
|
||||
# if the label is in user labels we do nothing here, it will get set
|
||||
# during the atomic update below.
|
||||
|
||||
# Atomically set a label
|
||||
for label in user_labels:
|
||||
_apply_node_label(label, overwrite=True)
|
||||
|
||||
|
||||
@when_any('config.changed.kubelet-extra-args',
|
||||
'config.changed.proxy-extra-args')
|
||||
def extra_args_changed():
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
def arch():
|
||||
'''Return the package architecture as a string. Raise an exception if the
|
||||
architecture is not supported by kubernetes.'''
|
||||
# Get the package architecture for this system.
|
||||
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
|
||||
# Convert the binary result into a string.
|
||||
architecture = architecture.decode('utf-8')
|
||||
return architecture
|
||||
|
||||
|
||||
def create_config(server, creds):
|
||||
'''Create a kubernetes configuration for the worker unit.'''
|
||||
# Get the options from the tls-client layer.
|
||||
layer_options = layer.options('tls-client')
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
|
||||
# Create kubernetes configuration in the default location for ubuntu.
|
||||
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
|
||||
token=creds['client_token'], user='ubuntu')
|
||||
# Make the config dir readable by the ubuntu users so juju scp works.
|
||||
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
|
||||
check_call(cmd)
|
||||
# Create kubernetes configuration in the default location for root.
|
||||
create_kubeconfig(kubeclientconfig_path, server, ca,
|
||||
token=creds['client_token'], user='root')
|
||||
# Create kubernetes configuration for kubelet, and kube-proxy services.
|
||||
create_kubeconfig(kubeconfig_path, server, ca,
|
||||
token=creds['kubelet_token'], user='kubelet')
|
||||
create_kubeconfig(kubeproxyconfig_path, server, ca,
|
||||
token=creds['proxy_token'], user='kube-proxy')
|
||||
|
||||
|
||||
def parse_extra_args(config_key):
|
||||
elements = hookenv.config().get(config_key, '').split()
|
||||
args = {}
|
||||
|
||||
for element in elements:
|
||||
if '=' in element:
|
||||
key, _, value = element.partition('=')
|
||||
args[key] = value
|
||||
else:
|
||||
args[element] = 'true'
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def configure_kubernetes_service(service, base_args, extra_args_key):
|
||||
db = unitdata.kv()
|
||||
|
||||
prev_args_key = 'kubernetes-worker.prev_args.' + service
|
||||
prev_args = db.get(prev_args_key) or {}
|
||||
|
||||
extra_args = parse_extra_args(extra_args_key)
|
||||
|
||||
args = {}
|
||||
for arg in prev_args:
|
||||
# remove previous args by setting to null
|
||||
args[arg] = 'null'
|
||||
for k, v in base_args.items():
|
||||
args[k] = v
|
||||
for k, v in extra_args.items():
|
||||
args[k] = v
|
||||
|
||||
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
|
||||
check_call(cmd)
|
||||
|
||||
db.set(prev_args_key, args)
|
||||
|
||||
|
||||
def configure_kubelet(dns):
|
||||
layer_options = layer.options('tls-client')
|
||||
ca_cert_path = layer_options.get('ca_certificate_path')
|
||||
server_cert_path = layer_options.get('server_certificate_path')
|
||||
server_key_path = layer_options.get('server_key_path')
|
||||
|
||||
kubelet_opts = {}
|
||||
kubelet_opts['require-kubeconfig'] = 'true'
|
||||
kubelet_opts['kubeconfig'] = kubeconfig_path
|
||||
kubelet_opts['network-plugin'] = 'cni'
|
||||
kubelet_opts['v'] = '0'
|
||||
kubelet_opts['address'] = '0.0.0.0'
|
||||
kubelet_opts['port'] = '10250'
|
||||
kubelet_opts['cluster-dns'] = dns['sdn-ip']
|
||||
kubelet_opts['cluster-domain'] = dns['domain']
|
||||
kubelet_opts['anonymous-auth'] = 'false'
|
||||
kubelet_opts['client-ca-file'] = ca_cert_path
|
||||
kubelet_opts['tls-cert-file'] = server_cert_path
|
||||
kubelet_opts['tls-private-key-file'] = server_key_path
|
||||
kubelet_opts['logtostderr'] = 'true'
|
||||
kubelet_opts['fail-swap-on'] = 'false'
|
||||
|
||||
privileged = is_state('kubernetes-worker.privileged')
|
||||
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
|
||||
|
||||
if is_state('kubernetes-worker.gpu.enabled'):
|
||||
if get_version('kubelet') < (1, 6):
|
||||
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
|
||||
kubelet_opts['experimental-nvidia-gpus'] = '1'
|
||||
else:
|
||||
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
|
||||
kubelet_opts['feature-gates'] = 'Accelerators=true'
|
||||
|
||||
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
|
||||
|
||||
|
||||
def configure_kube_proxy(api_servers, cluster_cidr):
|
||||
kube_proxy_opts = {}
|
||||
kube_proxy_opts['cluster-cidr'] = cluster_cidr
|
||||
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
|
||||
kube_proxy_opts['logtostderr'] = 'true'
|
||||
kube_proxy_opts['v'] = '0'
|
||||
kube_proxy_opts['master'] = random.choice(api_servers)
|
||||
|
||||
if b'lxc' in check_output('virt-what', shell=True):
|
||||
kube_proxy_opts['conntrack-max-per-core'] = '0'
|
||||
|
||||
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
|
||||
'proxy-extra-args')
|
||||
|
||||
|
||||
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
|
||||
user='ubuntu', context='juju-context',
|
||||
cluster='juju-cluster', password=None, token=None):
|
||||
'''Create a configuration for Kubernetes based on path using the supplied
|
||||
arguments for values of the Kubernetes server, CA, key, certificate, user
|
||||
context and cluster.'''
|
||||
if not key and not certificate and not password and not token:
|
||||
raise ValueError('Missing authentication mechanism.')
|
||||
|
||||
# token and password are mutually exclusive. Error early if both are
|
||||
# present. The developer has requested an impossible situation.
|
||||
# see: kubectl config set-credentials --help
|
||||
if token and password:
|
||||
raise ValueError('Token and Password are mutually exclusive.')
|
||||
# Create the config file with the address of the master server.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
|
||||
'--server={2} --certificate-authority={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
|
||||
# Delete old users
|
||||
cmd = 'kubectl config --kubeconfig={0} unset users'
|
||||
check_call(split(cmd.format(kubeconfig)))
|
||||
# Create the credentials using the client flags.
|
||||
cmd = 'kubectl config --kubeconfig={0} ' \
|
||||
'set-credentials {1} '.format(kubeconfig, user)
|
||||
|
||||
if key and certificate:
|
||||
cmd = '{0} --client-key={1} --client-certificate={2} '\
|
||||
'--embed-certs=true'.format(cmd, key, certificate)
|
||||
if password:
|
||||
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
|
||||
# This is mutually exclusive from password. They will not work together.
|
||||
if token:
|
||||
cmd = "{0} --token={1}".format(cmd, token)
|
||||
check_call(split(cmd))
|
||||
# Create a default context with the cluster.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
|
||||
'--cluster={2} --user={3}'
|
||||
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
|
||||
# Make the config use this new context.
|
||||
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
|
||||
check_call(split(cmd.format(kubeconfig, context)))
|
||||
|
||||
|
||||
def launch_default_ingress_controller():
|
||||
''' Launch the Kubernetes ingress controller & default backend (404) '''
|
||||
context = {}
|
||||
context['arch'] = arch()
|
||||
addon_path = '/root/cdk/addons/{}'
|
||||
|
||||
# Render the default http backend (404) replicationcontroller manifest
|
||||
manifest = addon_path.format('default-http-backend.yaml')
|
||||
render('default-http-backend.yaml', manifest, context)
|
||||
hookenv.log('Creating the default http backend.')
|
||||
try:
|
||||
kubectl('apply', '-f', manifest)
|
||||
except CalledProcessError as e:
|
||||
hookenv.log(e)
|
||||
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
|
||||
hookenv.close_port(80)
|
||||
hookenv.close_port(443)
|
||||
return
|
||||
|
||||
# Render the ingress replication controller manifest
|
||||
context['ingress_image'] = \
|
||||
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13"
|
||||
if arch() == 's390x':
|
||||
context['ingress_image'] = \
|
||||
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
|
||||
manifest = addon_path.format('ingress-replication-controller.yaml')
|
||||
render('ingress-replication-controller.yaml', manifest, context)
|
||||
hookenv.log('Creating the ingress replication controller.')
|
||||
try:
|
||||
kubectl('apply', '-f', manifest)
|
||||
except CalledProcessError as e:
|
||||
hookenv.log(e)
|
||||
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
|
||||
hookenv.close_port(80)
|
||||
hookenv.close_port(443)
|
||||
return
|
||||
|
||||
set_state('kubernetes-worker.ingress.available')
|
||||
hookenv.open_port(80)
|
||||
hookenv.open_port(443)
|
||||
|
||||
|
||||
def restart_unit_services():
|
||||
'''Restart worker services.'''
|
||||
hookenv.log('Restarting kubelet and kube-proxy.')
|
||||
services = ['kube-proxy', 'kubelet']
|
||||
for service in services:
|
||||
service_restart('snap.%s.daemon' % service)
|
||||
|
||||
|
||||
def get_kube_api_servers(kube_api):
|
||||
'''Return the kubernetes api server address and port for this
|
||||
relationship.'''
|
||||
hosts = []
|
||||
# Iterate over every service from the relation object.
|
||||
for service in kube_api.services():
|
||||
for unit in service['hosts']:
|
||||
hosts.append('https://{0}:{1}'.format(unit['hostname'],
|
||||
unit['port']))
|
||||
return hosts
|
||||
|
||||
|
||||
def kubectl(*args):
|
||||
''' Run a kubectl cli command with a config file. Returns stdout and throws
|
||||
an error if the command fails. '''
|
||||
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
|
||||
hookenv.log('Executing {}'.format(command))
|
||||
return check_output(command)
|
||||
|
||||
|
||||
def kubectl_success(*args):
|
||||
''' Runs kubectl with the given args. Returns True if succesful, False if
|
||||
not. '''
|
||||
try:
|
||||
kubectl(*args)
|
||||
return True
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def kubectl_manifest(operation, manifest):
|
||||
''' Wrap the kubectl creation command when using filepath resources
|
||||
:param operation - one of get, create, delete, replace
|
||||
:param manifest - filepath to the manifest
|
||||
'''
|
||||
# Deletions are a special case
|
||||
if operation == 'delete':
|
||||
# Ensure we immediately remove requested resources with --now
|
||||
return kubectl_success(operation, '-f', manifest, '--now')
|
||||
else:
|
||||
# Guard against an error re-creating the same manifest multiple times
|
||||
if operation == 'create':
|
||||
# If we already have the definition, its probably safe to assume
|
||||
# creation was true.
|
||||
if kubectl_success('get', '-f', manifest):
|
||||
hookenv.log('Skipping definition for {}'.format(manifest))
|
||||
return True
|
||||
# Execute the requested command that did not match any of the special
|
||||
# cases above
|
||||
return kubectl_success(operation, '-f', manifest)
|
||||
|
||||
|
||||
@when('nrpe-external-master.available')
|
||||
@when_not('nrpe-external-master.initial-config')
|
||||
def initial_nrpe_config(nagios=None):
|
||||
set_state('nrpe-external-master.initial-config')
|
||||
update_nrpe_config(nagios)
|
||||
|
||||
|
||||
@when('kubernetes-worker.config.created')
|
||||
@when('nrpe-external-master.available')
|
||||
@when_any('config.changed.nagios_context',
|
||||
'config.changed.nagios_servicegroups')
|
||||
def update_nrpe_config(unused=None):
|
||||
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
|
||||
hostname = nrpe.get_nagios_hostname()
|
||||
current_unit = nrpe.get_nagios_unit_name()
|
||||
nrpe_setup = nrpe.NRPE(hostname=hostname)
|
||||
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
|
||||
nrpe_setup.write()
|
||||
|
||||
|
||||
@when_not('nrpe-external-master.available')
|
||||
@when('nrpe-external-master.initial-config')
|
||||
def remove_nrpe_config(nagios=None):
|
||||
remove_state('nrpe-external-master.initial-config')
|
||||
|
||||
# List of systemd services for which the checks will be removed
|
||||
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
|
||||
|
||||
# The current nrpe-external-master interface doesn't handle a lot of logic,
|
||||
# use the charm-helpers code for now.
|
||||
hostname = nrpe.get_nagios_hostname()
|
||||
nrpe_setup = nrpe.NRPE(hostname=hostname)
|
||||
|
||||
for service in services:
|
||||
nrpe_setup.remove_check(shortname=service)
|
||||
|
||||
|
||||
def set_privileged():
|
||||
"""Update the allow-privileged flag for kubelet.
|
||||
|
||||
"""
|
||||
privileged = hookenv.config('allow-privileged')
|
||||
if privileged == 'auto':
|
||||
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
|
||||
privileged = 'true' if gpu_enabled else 'false'
|
||||
|
||||
if privileged == 'true':
|
||||
set_state('kubernetes-worker.privileged')
|
||||
else:
|
||||
remove_state('kubernetes-worker.privileged')
|
||||
|
||||
|
||||
@when('config.changed.allow-privileged')
|
||||
@when('kubernetes-worker.config.created')
|
||||
def on_config_allow_privileged_change():
|
||||
"""React to changed 'allow-privileged' config value.
|
||||
|
||||
"""
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
remove_state('config.changed.allow-privileged')
|
||||
|
||||
|
||||
@when('cuda.installed')
|
||||
@when('kubernetes-worker.config.created')
|
||||
@when_not('kubernetes-worker.gpu.enabled')
|
||||
def enable_gpu():
|
||||
"""Enable GPU usage on this node.
|
||||
|
||||
"""
|
||||
config = hookenv.config()
|
||||
if config['allow-privileged'] == "false":
|
||||
hookenv.status_set(
|
||||
'active',
|
||||
'GPUs available. Set allow-privileged="auto" to enable.'
|
||||
)
|
||||
return
|
||||
|
||||
hookenv.log('Enabling gpu mode')
|
||||
try:
|
||||
# Not sure why this is necessary, but if you don't run this, k8s will
|
||||
# think that the node has 0 gpus (as shown by the output of
|
||||
# `kubectl get nodes -o yaml`
|
||||
check_call(['nvidia-smi'])
|
||||
except CalledProcessError as cpe:
|
||||
hookenv.log('Unable to communicate with the NVIDIA driver.')
|
||||
hookenv.log(cpe)
|
||||
return
|
||||
|
||||
# Apply node labels
|
||||
_apply_node_label('gpu=true', overwrite=True)
|
||||
_apply_node_label('cuda=true', overwrite=True)
|
||||
|
||||
set_state('kubernetes-worker.gpu.enabled')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
@when('kubernetes-worker.gpu.enabled')
|
||||
@when_not('kubernetes-worker.privileged')
|
||||
@when_not('kubernetes-worker.restart-needed')
|
||||
def disable_gpu():
|
||||
"""Disable GPU usage on this node.
|
||||
|
||||
This handler fires when we're running in gpu mode, and then the operator
|
||||
sets allow-privileged="false". Since we can no longer run privileged
|
||||
containers, we need to disable gpu mode.
|
||||
|
||||
"""
|
||||
hookenv.log('Disabling gpu mode')
|
||||
|
||||
# Remove node labels
|
||||
_apply_node_label('gpu', delete=True)
|
||||
_apply_node_label('cuda', delete=True)
|
||||
|
||||
remove_state('kubernetes-worker.gpu.enabled')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
@when('kubernetes-worker.gpu.enabled')
|
||||
@when('kube-control.connected')
|
||||
def notify_master_gpu_enabled(kube_control):
|
||||
"""Notify kubernetes-master that we're gpu-enabled.
|
||||
|
||||
"""
|
||||
kube_control.set_gpu(True)
|
||||
|
||||
|
||||
@when_not('kubernetes-worker.gpu.enabled')
|
||||
@when('kube-control.connected')
|
||||
def notify_master_gpu_not_enabled(kube_control):
|
||||
"""Notify kubernetes-master that we're not gpu-enabled.
|
||||
|
||||
"""
|
||||
kube_control.set_gpu(False)
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def request_kubelet_and_proxy_credentials(kube_control):
|
||||
""" Request kubelet node authorization with a well formed kubelet user.
|
||||
This also implies that we are requesting kube-proxy auth. """
|
||||
|
||||
# The kube-cotrol interface is created to support RBAC.
|
||||
# At this point we might as well do the right thing and return the hostname
|
||||
# even if it will only be used when we enable RBAC
|
||||
nodeuser = 'system:node:{}'.format(gethostname())
|
||||
kube_control.set_auth_request(nodeuser)
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def catch_change_in_creds(kube_control):
|
||||
"""Request a service restart in case credential updates were detected."""
|
||||
nodeuser = 'system:node:{}'.format(gethostname())
|
||||
creds = kube_control.get_auth_credentials(nodeuser)
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
and creds['user'] == nodeuser:
|
||||
# We need to cache the credentials here because if the
|
||||
# master changes (master leader dies and replaced by a new one)
|
||||
# the new master will have no recollection of our certs.
|
||||
db.set('credentials', creds)
|
||||
set_state('worker.auth.bootstrapped')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
@when_not('kube-control.connected')
|
||||
def missing_kube_control():
|
||||
"""Inform the operator they need to add the kube-control relation.
|
||||
|
||||
If deploying via bundle this won't happen, but if operator is upgrading a
|
||||
a charm in a deployment that pre-dates the kube-control relation, it'll be
|
||||
missing.
|
||||
|
||||
"""
|
||||
hookenv.status_set(
|
||||
'blocked',
|
||||
'Relate {}:kube-control kubernetes-master:kube-control'.format(
|
||||
hookenv.service_name()))
|
||||
|
||||
|
||||
@when('docker.ready')
|
||||
def fix_iptables_for_docker_1_13():
|
||||
""" Fix iptables FORWARD policy for Docker >=1.13
|
||||
https://github.com/kubernetes/kubernetes/issues/40182
|
||||
https://github.com/kubernetes/kubernetes/issues/39823
|
||||
"""
|
||||
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def _systemctl_is_active(application):
|
||||
''' Poll systemctl to determine if the application is running '''
|
||||
cmd = ['systemctl', 'is-active', application]
|
||||
try:
|
||||
raw = check_output(cmd)
|
||||
return b'active' in raw
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
class ApplyNodeLabelFailed(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _apply_node_label(label, delete=False, overwrite=False):
|
||||
''' Invoke kubectl to apply node label changes '''
|
||||
|
||||
hostname = gethostname()
|
||||
# TODO: Make this part of the kubectl calls instead of a special string
|
||||
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
|
||||
|
||||
if delete is True:
|
||||
label_key = label.split('=')[0]
|
||||
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
|
||||
cmd = cmd + '-'
|
||||
else:
|
||||
cmd = cmd_base.format(kubeconfig_path, hostname, label)
|
||||
if overwrite:
|
||||
cmd = '{} --overwrite'.format(cmd)
|
||||
cmd = cmd.split()
|
||||
|
||||
deadline = time.time() + 60
|
||||
while time.time() < deadline:
|
||||
code = subprocess.call(cmd)
|
||||
if code == 0:
|
||||
break
|
||||
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
|
||||
label, code))
|
||||
time.sleep(1)
|
||||
else:
|
||||
msg = 'Failed to apply label %s' % label
|
||||
raise ApplyNodeLabelFailed(msg)
|
||||
|
||||
|
||||
def _parse_labels(labels):
|
||||
''' Parse labels from a key=value string separated by space.'''
|
||||
label_array = labels.split(' ')
|
||||
sanitized_labels = []
|
||||
for item in label_array:
|
||||
if '=' in item:
|
||||
sanitized_labels.append(item)
|
||||
else:
|
||||
hookenv.log('Skipping malformed option: {}'.format(item))
|
||||
return sanitized_labels
|
6
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/registry-configmap.yaml
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/registry-configmap.yaml
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
body-size: 1024m
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
44
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: default-http-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: default-http-backend
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
# namespace: kube-system
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: default-http-backend
|
178
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nginx-ingress-serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: nginx-ingress-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
# Defaults to "<election-id>-<ingress-class>"
|
||||
# Here: "<ingress-controller-leader>-<nginx>"
|
||||
# This has to be adapted if you change either parameter
|
||||
# when launching the nginx-ingress-controller.
|
||||
- "ingress-controller-leader-nginx"
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-role-nisa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: nginx-ingress-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole-nisa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: nginx-ingress-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
# hostPort doesn't work with CNI, so we have to use hostNetwork instead
|
||||
# see https://github.com/kubernetes/kubernetes/issues/23920
|
||||
hostNetwork: true
|
||||
serviceAccountName: nginx-ingress-serviceaccount
|
||||
containers:
|
||||
- image: {{ ingress_image }}
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- containerPort: 443
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf
|
63
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/microbot-example.yaml
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/microbot-example.yaml
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: microbot
|
||||
name: microbot
|
||||
spec:
|
||||
replicas: {{ replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: microbot
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: microbot
|
||||
spec:
|
||||
containers:
|
||||
- image: dontrebootme/microbot:v1
|
||||
imagePullPolicy: ""
|
||||
name: microbot
|
||||
ports:
|
||||
- containerPort: 80
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 30
|
||||
resources: {}
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ""
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: microbot
|
||||
labels:
|
||||
app: microbot
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: microbot
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: microbot-ingress
|
||||
spec:
|
||||
rules:
|
||||
- host: microbot.{{ public_address }}.xip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: microbot
|
||||
servicePort: 80
|
118
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/registry.yaml
generated
vendored
Normal file
118
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/templates/registry.yaml
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: registry-tls-data
|
||||
type: Opaque
|
||||
data:
|
||||
tls.crt: {{ tlscert }}
|
||||
tls.key: {{ tlskey }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: registry-auth-data
|
||||
type: Opaque
|
||||
data:
|
||||
htpasswd: {{ htpasswd }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-registry-v0
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
version: v0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kube-registry
|
||||
version: v0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
version: v0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry:2
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
value: :5000
|
||||
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
|
||||
value: /var/lib/registry
|
||||
- name: REGISTRY_AUTH_HTPASSWD_REALM
|
||||
value: basic_realm
|
||||
- name: REGISTRY_AUTH_HTPASSWD_PATH
|
||||
value: /auth/htpasswd
|
||||
volumeMounts:
|
||||
- name: image-store
|
||||
mountPath: /var/lib/registry
|
||||
- name: auth-dir
|
||||
mountPath: /auth
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: registry
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: image-store
|
||||
hostPath:
|
||||
path: /srv/registry
|
||||
- name: auth-dir
|
||||
secret:
|
||||
secretName: registry-auth-data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-registry
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeRegistry"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-registry
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: registry
|
||||
port: 5000
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: registry-access
|
||||
data:
|
||||
.dockercfg: {{ dockercfg }}
|
||||
type: kubernetes.io/dockercfg
|
||||
{%- if ingress %}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: registry-ing
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ domain }}
|
||||
secretName: registry-tls-data
|
||||
rules:
|
||||
- host: {{ domain }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: kube-registry
|
||||
servicePort: 5000
|
||||
path: /
|
||||
{% endif %}
|
1
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/wheelhouse.txt
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/wheelhouse.txt
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
charms.templating.jinja2>=0.0.1,<2.0.0
|
3
vendor/k8s.io/kubernetes/cluster/juju/prereqs/OWNERS
generated
vendored
Executable file
3
vendor/k8s.io/kubernetes/cluster/juju/prereqs/OWNERS
generated
vendored
Executable file
@ -0,0 +1,3 @@
|
||||
reviewers:
|
||||
- eparis
|
||||
- david-mcmahon
|
48
vendor/k8s.io/kubernetes/cluster/juju/prereqs/ubuntu-juju.sh
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/cluster/juju/prereqs/ubuntu-juju.sh
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
|
||||
function check_for_ppa() {
|
||||
local repo="$1"
|
||||
grep -qsw $repo /etc/apt/sources.list /etc/apt/sources.list.d/*
|
||||
}
|
||||
|
||||
function package_status() {
|
||||
local pkgname=$1
|
||||
local pkgstatus
|
||||
pkgstatus=$(dpkg-query -W --showformat='${Status}\n' "${pkgname}")
|
||||
if [[ "${pkgstatus}" != "install ok installed" ]]; then
|
||||
echo "Missing package ${pkgname}"
|
||||
sudo apt-get --force-yes --yes install ${pkgname}
|
||||
fi
|
||||
}
|
||||
|
||||
function gather_installation_reqs() {
|
||||
if ! check_for_ppa "juju"; then
|
||||
echo "... Detected missing dependencies.. running"
|
||||
echo "... add-apt-repository ppa:juju/stable"
|
||||
sudo add-apt-repository -y ppa:juju/stable
|
||||
sudo apt-get update
|
||||
fi
|
||||
|
||||
package_status 'juju'
|
||||
package_status 'charm-tools'
|
||||
}
|
29
vendor/k8s.io/kubernetes/cluster/juju/return-node-ips.py
generated
vendored
Executable file
29
vendor/k8s.io/kubernetes/cluster/juju/return-node-ips.py
generated
vendored
Executable file
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import sys
|
||||
# This script helps parse out the private IP addresses from the
|
||||
# `juju run` command's JSON object, see cluster/juju/util.sh
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
# It takes the JSON output as the first argument.
|
||||
nodes = json.loads(sys.argv[1])
|
||||
# There can be multiple nodes to print the Stdout.
|
||||
for num in nodes:
|
||||
print num['Stdout'].rstrip()
|
||||
else:
|
||||
exit(1)
|
150
vendor/k8s.io/kubernetes/cluster/juju/util.sh
generated
vendored
Executable file
150
vendor/k8s.io/kubernetes/cluster/juju/util.sh
generated
vendored
Executable file
@ -0,0 +1,150 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
|
||||
UTIL_SCRIPT=$(readlink -m "${BASH_SOURCE}")
|
||||
JUJU_PATH=$(dirname ${UTIL_SCRIPT})
|
||||
KUBE_ROOT=$(readlink -m ${JUJU_PATH}/../../)
|
||||
# Use the config file specified in $KUBE_CONFIG_FILE, or config-default.sh.
|
||||
source "${JUJU_PATH}/${KUBE_CONFIG_FILE-config-default.sh}"
|
||||
# This attempts installation of Juju - This really needs to support multiple
|
||||
# providers/distros - but I'm super familiar with ubuntu so assume that for now.
|
||||
source "${JUJU_PATH}/prereqs/ubuntu-juju.sh"
|
||||
export JUJU_REPOSITORY="${JUJU_PATH}/charms"
|
||||
KUBE_BUNDLE_PATH="${JUJU_PATH}/bundles/local.yaml"
|
||||
# The directory for the kubectl binary, this is one of the paths in kubectl.sh.
|
||||
KUBECTL_DIR="${KUBE_ROOT}/platforms/linux/amd64"
|
||||
|
||||
|
||||
function build-local() {
|
||||
# This used to build the kubernetes project. Now it rebuilds the charm(s)
|
||||
# living in `cluster/juju/layers`
|
||||
|
||||
charm build ${JUJU_PATH}/layers/kubernetes -o $JUJU_REPOSITORY -r --no-local-layers
|
||||
}
|
||||
|
||||
function detect-master() {
|
||||
local kubestatus
|
||||
|
||||
# Capturing a newline, and my awk-fu was weak - pipe through tr -d
|
||||
kubestatus=$(juju status --format=oneline kubernetes | grep ${KUBE_MASTER_NAME} | awk '{print $3}' | tr -d "\n")
|
||||
export KUBE_MASTER_IP=${kubestatus}
|
||||
export KUBE_SERVER=https://${KUBE_MASTER_IP}:6433
|
||||
|
||||
}
|
||||
|
||||
function detect-nodes() {
|
||||
# Run the Juju command that gets the minion private IP addresses.
|
||||
local ipoutput
|
||||
ipoutput=$(juju run --application kubernetes "unit-get private-address" --format=json)
|
||||
# [
|
||||
# {"MachineId":"2","Stdout":"192.168.122.188\n","UnitId":"kubernetes/0"},
|
||||
# {"MachineId":"3","Stdout":"192.168.122.166\n","UnitId":"kubernetes/1"}
|
||||
# ]
|
||||
|
||||
# Strip out the IP addresses
|
||||
export KUBE_NODE_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}"))
|
||||
# echo "Kubernetes minions: " ${KUBE_NODE_IP_ADDRESSES[@]} 1>&2
|
||||
export NUM_NODES=${#KUBE_NODE_IP_ADDRESSES[@]}
|
||||
}
|
||||
|
||||
function kube-up() {
|
||||
build-local
|
||||
|
||||
# Replace the charm directory in the bundle.
|
||||
sed "s|__CHARM_DIR__|${JUJU_REPOSITORY}|" < ${KUBE_BUNDLE_PATH}.base > ${KUBE_BUNDLE_PATH}
|
||||
|
||||
# The juju-deployer command will deploy the bundle and can be run
|
||||
# multiple times to continue deploying the parts that fail.
|
||||
juju deploy ${KUBE_BUNDLE_PATH}
|
||||
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# Sleep due to juju bug http://pad.lv/1432759
|
||||
sleep-status
|
||||
detect-master
|
||||
detect-nodes
|
||||
|
||||
# Copy kubectl, the cert and key to this machine from master.
|
||||
(
|
||||
umask 077
|
||||
mkdir -p ${KUBECTL_DIR}
|
||||
juju scp ${KUBE_MASTER_NAME}:kubectl_package.tar.gz ${KUBECTL_DIR}
|
||||
tar xfz ${KUBECTL_DIR}/kubectl_package.tar.gz -C ${KUBECTL_DIR}
|
||||
)
|
||||
# Export the location of the kubectl configuration file.
|
||||
export KUBECONFIG="${KUBECTL_DIR}/kubeconfig"
|
||||
}
|
||||
|
||||
function kube-down() {
|
||||
local force="${1-}"
|
||||
local jujuenv
|
||||
jujuenv=$(juju switch)
|
||||
juju destroy-model ${jujuenv} ${force} || true
|
||||
# Clean up the generated charm files.
|
||||
rm -rf ${KUBE_ROOT}/cluster/juju/charms
|
||||
# Clean up the kubectl binary and config file.
|
||||
rm -rf ${KUBECTL_DIR}
|
||||
}
|
||||
|
||||
function prepare-e2e() {
|
||||
echo "prepare-e2e() The Juju provider does not need any preparations for e2e." 1>&2
|
||||
}
|
||||
|
||||
function sleep-status() {
|
||||
local i
|
||||
local maxtime
|
||||
local jujustatus
|
||||
i=0
|
||||
maxtime=900
|
||||
jujustatus=''
|
||||
echo "Waiting up to 15 minutes to allow the cluster to come online... wait for it..." 1>&2
|
||||
|
||||
while [[ $i < $maxtime && -z $jujustatus ]]; do
|
||||
sleep 15
|
||||
i=$((i + 15))
|
||||
jujustatus=$(${JUJU_PATH}/identify-leaders.py)
|
||||
export KUBE_MASTER_NAME=${jujustatus}
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
# Execute prior to running tests to build a release if required for environment.
|
||||
function test-build-release {
|
||||
echo "test-build-release() " 1>&2
|
||||
}
|
||||
|
||||
# Execute prior to running tests to initialize required structure. This is
|
||||
# called from hack/e2e.go only when running -up.
|
||||
function test-setup {
|
||||
"${KUBE_ROOT}/cluster/kube-up.sh"
|
||||
}
|
||||
|
||||
# Execute after running tests to perform any required clean-up. This is called
|
||||
# from hack/e2e.go
|
||||
function test-teardown() {
|
||||
kube-down "-y"
|
||||
}
|
||||
|
||||
# Verify the prerequisites are statisfied before running.
|
||||
function verify-prereqs() {
|
||||
gather_installation_reqs
|
||||
}
|
Reference in New Issue
Block a user