Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -4,3 +4,5 @@ approvers:
reviewers:
- coffeepac
- piosz
labels:
- sig/instrumentation

View File

@ -19,15 +19,16 @@ a Deployment, but allows for maintaining state on storage volumes.
### Security
Elasticsearch has capabilities to enable authorization using the
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
in Elasticsearch and Kibana configurations. It can also be set via the
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
follow [official documentation][setupCreds] to set up credentials in
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
Fluentd in its [configuration][fluentdCreds], using for example
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
Elasticsearch has capabilities to enable authorization using the [X-Pack
plugin][xPack]. For the sake of simplicity this example uses the fully open
source prebuild images from elastic that do not contain the X-Pack plugin. If
you need these features, please consider building the images from either the
"basic" or "platinum" version. After enabling these features, follow [official
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
Don't forget to propagate those credentials also to Fluentd in its
[configuration][fluentdCreds], using for example [environment
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
[Secrets][secret] to store credentials in the Kubernetes apiserver.
### Initialization

View File

@ -18,11 +18,11 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2
VOLUME ["/data"]
EXPOSE 9200 9300

View File

@ -16,7 +16,7 @@
PREFIX = staging-k8s.gcr.io
IMAGE = elasticsearch
TAG = v5.6.4
TAG = v6.3.0
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .

View File

@ -12,6 +12,3 @@ path.data: /data
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
xpack.security.enabled: false
xpack.monitoring.enabled: false

View File

@ -20,14 +20,15 @@ import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog"
api "k8s.io/kubernetes/pkg/apis/core"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
)
@ -60,22 +61,22 @@ func flattenSubsets(subsets []api.EndpointSubset) []string {
func main() {
flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery")
klog.Info("Kubernetes Elasticsearch logging discovery")
cc, err := buildConfigFromEnvs(os.Getenv("APISERVER_HOST"), os.Getenv("KUBE_CONFIG_FILE"))
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
klog.Fatalf("Failed to make client: %v", err)
}
client, err := clientset.NewForConfig(cc)
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
klog.Fatalf("Failed to make client: %v", err)
}
namespace := metav1.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" {
if _, err := client.Core().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
}
namespace = envNamespace
}
@ -97,32 +98,31 @@ func main() {
// If we did not find an elasticsearch logging service then log a warning
// and return without adding any unicast hosts.
if elasticsearch == nil {
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
klog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
return
}
var endpoints *api.Endpoints
addrs := []string{}
// Wait for some endpoints.
count := 0
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
continue
}
addrs = flattenSubsets(endpoints.Subsets)
glog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) == count {
klog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) >= count {
break
}
count = len(addrs)
}
// If there was an error finding endpoints then log a warning and quit.
if err != nil {
glog.Warningf("Error finding endpoints: %v", err)
klog.Warningf("Error finding endpoints: %v", err)
return
}
glog.Infof("Endpoints = %s", addrs)
klog.Infof("Endpoints = %s", addrs)
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
}

View File

@ -26,4 +26,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
chown -R elasticsearch:elasticsearch /data
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
exec su elasticsearch -c ./bin/es-docker
exec su elasticsearch -c /usr/local/bin/docker-entrypoint.sh

View File

@ -54,7 +54,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.3.0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@ -63,17 +63,17 @@ spec:
selector:
matchLabels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.3.0
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.3.0
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: k8s.gcr.io/elasticsearch:v5.6.4
- image: k8s.gcr.io/elasticsearch:v6.3.0
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View File

@ -1,7 +1,7 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.1.4
name: fluentd-es-config-v0.1.6
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
@ -115,7 +115,6 @@ data:
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag raw.kubernetes.*
read_from_head true
<parse>
@ -273,21 +272,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@ -323,10 +307,11 @@ data:
<source>
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
@ -335,10 +320,11 @@ data:
<source>
@id journald-container-runtime
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
<storage>
@type local
persistent true
path /var/log/journald-container-runtime.pos
</storage>
read_from_head true
tag container-runtime
@ -347,10 +333,11 @@ data:
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
@ -359,22 +346,24 @@ data:
<source>
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
path /var/log/journald-node-problem-detector.pos
</storage>
read_from_head true
tag node-problem-detector
</source>
<source>
@id kernel
@type systemd
filters [{ "_TRANSPORT": "kernel" }]
matches [{ "_TRANSPORT": "kernel" }]
<storage>
@type local
persistent true
path /var/log/kernel.pos
</storage>
<entry>
fields_strip_underscores true
@ -431,10 +420,19 @@ data:
@type kubernetes_metadata
</filter>
# Concatenate multi-line logs
<filter **>
@type concat
key message
multiline_end_regexp /\n$/
separator ""
</filter>
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
type_name fluentd
include_tag_key true
host elasticsearch-logging
port 9200

View File

@ -48,24 +48,24 @@ roleRef:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v2.0.4
name: fluentd-es-v2.2.1
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.0.4
version: v2.2.1
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.0.4
version: v2.2.1
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: v2.0.4
version: v2.2.1
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@ -77,7 +77,7 @@ spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
@ -107,4 +107,4 @@ spec:
path: /var/lib/docker/containers
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.4
name: fluentd-es-config-v0.1.6

View File

@ -55,4 +55,4 @@ EXPOSE 80
ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
# Start Fluentd to pick up our config that watches Docker container logs.
CMD /run.sh $FLUENTD_ARGS
CMD ["/run.sh"]

View File

@ -1,11 +1,12 @@
source 'https://rubygems.org'
gem 'fluentd', '<=1.1.0'
gem 'activesupport', '~>5.1.4'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
gem 'fluent-plugin-systemd', '~>0.3.1'
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
gem 'fluent-plugin-prometheus', '~>0.3.0'
gem 'fluentd', '<=1.2.4'
gem 'activesupport', '~>5.2.1'
gem 'fluent-plugin-concat', '~>2.3.0'
gem 'fluent-plugin-detect-exceptions', '~>0.0.11'
gem 'fluent-plugin-elasticsearch', '~>2.11.5'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.0.0'
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
gem 'oj', '~>3.3.1.0'
gem 'fluent-plugin-prometheus', '~>1.0.1'
gem 'fluent-plugin-systemd', '~>1.0.1'
gem 'oj', '~>3.6.5'

View File

@ -16,7 +16,7 @@
PREFIX = staging-k8s.gcr.io
IMAGE = fluentd-elasticsearch
TAG = v2.0.4
TAG = v2.3.1
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .

View File

@ -20,4 +20,4 @@
# For systems without journald
mkdir -p /var/log/journal
exec /usr/local/bin/fluentd $@
exec /usr/local/bin/fluentd $FLUENTD_ARGS

View File

@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: docker.elastic.co/kibana/kibana:5.6.4
image: docker.elastic.co/kibana/kibana-oss:6.3.2
resources:
# need more cpu upon initialization, therefore burstable class
limits:
@ -33,10 +33,6 @@ spec:
value: http://elasticsearch-logging:9200
- name: SERVER_BASEPATH
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED
value: "false"
ports:
- containerPort: 5601
name: ui