mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/cassandra/README.md](https://github.com/kubernetes/examples/blob/master/cassandra/README.md)
|
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-controller.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: cassandra
|
||||
# The labels will be applied automatically
|
||||
# from the labels in the pod template, if not set
|
||||
# labels:
|
||||
# app: cassandra
|
||||
spec:
|
||||
replicas: 2
|
||||
# The selector will be applied automatically
|
||||
# from the labels in the pod template, if not set.
|
||||
# selector:
|
||||
# app: cassandra
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /run.sh
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.5
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEED_PROVIDER
|
||||
value: "io.k8s.cassandra.KubernetesSeedProvider"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
image: gcr.io/google-samples/cassandra:v12
|
||||
name: cassandra
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
volumeMounts:
|
||||
- mountPath: /cassandra_data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
56
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-daemonset.yaml
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-daemonset.yaml
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
name: cassandra
|
||||
name: cassandra
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
# Filter to specific nodes:
|
||||
# nodeSelector:
|
||||
# app: cassandra
|
||||
containers:
|
||||
- command:
|
||||
- /run.sh
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEED_PROVIDER
|
||||
value: "io.k8s.cassandra.KubernetesSeedProvider"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
image: gcr.io/google-samples/cassandra:v12
|
||||
name: cassandra
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
# If you need it it is going away in C* 4.0
|
||||
#- containerPort: 9160
|
||||
# name: thrift
|
||||
resources:
|
||||
requests:
|
||||
cpu: 0.5
|
||||
volumeMounts:
|
||||
- mountPath: /cassandra_data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
12
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-service.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
name: cassandra
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9042
|
||||
selector:
|
||||
app: cassandra
|
98
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-statefulset.yaml
generated
vendored
Normal file
98
vendor/k8s.io/kubernetes/examples/storage/cassandra/cassandra-statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
apiVersion: "apps/v1beta1"
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: cassandra
|
||||
spec:
|
||||
serviceName: cassandra
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cassandra
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 1800
|
||||
containers:
|
||||
- name: cassandra
|
||||
image: gcr.io/google-samples/cassandra:v12
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 7000
|
||||
name: intra-node
|
||||
- containerPort: 7001
|
||||
name: tls-intra-node
|
||||
- containerPort: 7199
|
||||
name: jmx
|
||||
- containerPort: 9042
|
||||
name: cql
|
||||
resources:
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: 1Gi
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "nodetool drain"]
|
||||
env:
|
||||
- name: MAX_HEAP_SIZE
|
||||
value: 512M
|
||||
- name: HEAP_NEWSIZE
|
||||
value: 100M
|
||||
- name: CASSANDRA_SEEDS
|
||||
value: "cassandra-0.cassandra.default.svc.cluster.local"
|
||||
- name: CASSANDRA_CLUSTER_NAME
|
||||
value: "K8Demo"
|
||||
- name: CASSANDRA_DC
|
||||
value: "DC1-K8Demo"
|
||||
- name: CASSANDRA_RACK
|
||||
value: "Rack1-K8Demo"
|
||||
- name: CASSANDRA_AUTO_BOOTSTRAP
|
||||
value: "false"
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- /ready-probe.sh
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
# These volume mounts are persistent. They are like inline claims,
|
||||
# but not exactly because the names need to match exactly one of
|
||||
# the stateful pod volumes.
|
||||
volumeMounts:
|
||||
- name: cassandra-data
|
||||
mountPath: /cassandra_data
|
||||
# These are converted to volume claims by the controller
|
||||
# and mounted at the paths mentioned above.
|
||||
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: cassandra-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: fast
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/gce-pd
|
||||
parameters:
|
||||
type: pd-ssd
|
131
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Dockerfile
generated
vendored
Normal file
131
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google_containers/ubuntu-slim:0.9
|
||||
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
ARG CASSANDRA_VERSION
|
||||
ARG DEV_CONTAINER
|
||||
|
||||
LABEL \
|
||||
org.label-schema.build-date=$BUILD_DATE \
|
||||
org.label-schema.docker.dockerfile="/Dockerfile" \
|
||||
org.label-schema.license="Apache License 2.0" \
|
||||
org.label-schema.name="k8s-for-greeks/docker-cassandra-k8s" \
|
||||
org.label-schema.url="https://github.com/k8s-for-greeks/" \
|
||||
org.label-schema.vcs-ref=$VCS_REF \
|
||||
org.label-schema.vcs-type="Git" \
|
||||
org.label-schema.vcs-url="https://github.com/k8s-for-greeks/docker-cassandra-k8s"
|
||||
|
||||
ENV CASSANDRA_HOME=/usr/local/apache-cassandra-${CASSANDRA_VERSION} \
|
||||
CASSANDRA_CONF=/etc/cassandra \
|
||||
CASSANDRA_DATA=/cassandra_data \
|
||||
CASSANDRA_LOGS=/var/log/cassandra \
|
||||
JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 \
|
||||
PATH=${PATH}:/usr/lib/jvm/java-8-openjdk-amd64/bin:/usr/local/apache-cassandra-${CASSANDRA_VERSION}/bin \
|
||||
DI_VERSION=1.2.0 \
|
||||
DI_SHA=81231da1cd074fdc81af62789fead8641ef3f24b6b07366a1c34e5b059faf363
|
||||
|
||||
ADD files /
|
||||
|
||||
RUN set -e && echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections \
|
||||
&& apt-get update && apt-get -qq -y --force-yes install --no-install-recommends \
|
||||
openjdk-8-jre-headless \
|
||||
libjemalloc1 \
|
||||
localepurge \
|
||||
wget && \
|
||||
mirror_url=$( wget -q -O - http://www.apache.org/dyn/closer.cgi/cassandra/ \
|
||||
| sed -n 's#.*href="\(http://.*/cassandra\/[^"]*\)".*#\1#p' \
|
||||
| head -n 1 \
|
||||
) \
|
||||
&& wget -q -O - ${mirror_url}/${CASSANDRA_VERSION}/apache-cassandra-${CASSANDRA_VERSION}-bin.tar.gz \
|
||||
| tar -xzf - -C /usr/local \
|
||||
&& wget -q -O - https://github.com/Yelp/dumb-init/releases/download/v${DI_VERSION}/dumb-init_${DI_VERSION}_amd64 > /sbin/dumb-init \
|
||||
&& echo "$DI_SHA /sbin/dumb-init" | sha256sum -c - \
|
||||
&& chmod +x /sbin/dumb-init \
|
||||
&& chmod +x /ready-probe.sh \
|
||||
&& mkdir -p /cassandra_data/data \
|
||||
&& mkdir -p /etc/cassandra \
|
||||
&& mv /logback.xml /cassandra.yaml /jvm.options /etc/cassandra/ \
|
||||
&& mv /usr/local/apache-cassandra-${CASSANDRA_VERSION}/conf/cassandra-env.sh /etc/cassandra/ \
|
||||
&& adduser --disabled-password --no-create-home --gecos '' --disabled-login cassandra \
|
||||
&& chown cassandra: /ready-probe.sh \
|
||||
&& if [ -n "$DEV_CONTAINER" ]; then apt-get -y --no-install-recommends install python; else rm -rf $CASSANDRA_HOME/pylib; fi \
|
||||
&& apt-get -y purge wget localepurge \
|
||||
&& apt-get -y autoremove \
|
||||
&& apt-get clean \
|
||||
&& rm -rf \
|
||||
$CASSANDRA_HOME/*.txt \
|
||||
$CASSANDRA_HOME/doc \
|
||||
$CASSANDRA_HOME/javadoc \
|
||||
$CASSANDRA_HOME/tools/*.yaml \
|
||||
$CASSANDRA_HOME/tools/bin/*.bat \
|
||||
$CASSANDRA_HOME/bin/*.bat \
|
||||
doc \
|
||||
man \
|
||||
info \
|
||||
locale \
|
||||
common-licenses \
|
||||
~/.bashrc \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/log/* \
|
||||
/var/cache/debconf/* \
|
||||
/etc/systemd \
|
||||
/lib/lsb \
|
||||
/lib/udev \
|
||||
/usr/share/doc/ \
|
||||
/usr/share/doc-base/ \
|
||||
/usr/share/man/ \
|
||||
/tmp/* \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/plugin \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/javaws \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/jjs \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/orbd \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/bin/pack200 \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/policytool \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/rmid \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/rmiregistry \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/servertool \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/bin/tnameserv \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/unpack200 \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/javaws.jar \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/deploy* \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/desktop \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/*javafx* \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/*jfx* \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libdecora_sse.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libprism_*.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libfxplugins.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libglass.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libgstreamer-lite.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libjavafx*.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libjfx*.so \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/ext/jfxrt.jar \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/ext/nashorn.jar \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/oblique-fonts \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/plugin.jar \
|
||||
/usr/lib/jvm/java-8-openjdk-amd64/man
|
||||
|
||||
|
||||
VOLUME ["/$CASSANDRA_DATA"]
|
||||
|
||||
# 7000: intra-node communication
|
||||
# 7001: TLS intra-node communication
|
||||
# 7199: JMX
|
||||
# 9042: CQL
|
||||
# 9160: thrift service
|
||||
EXPOSE 7000 7001 7199 9042 9160
|
||||
|
||||
CMD ["/sbin/dumb-init", "/bin/bash", "/run.sh"]
|
41
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Makefile
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/Makefile
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# build the cassandra image.
|
||||
VERSION=v12
|
||||
PROJECT_ID?=google_samples
|
||||
PROJECT=gcr.io/${PROJECT_ID}
|
||||
CASSANDRA_VERSION=3.10
|
||||
|
||||
all: kubernetes-cassandra.jar build
|
||||
|
||||
kubernetes-cassandra.jar: ../java/* ../java/src/main/java/io/k8s/cassandra/*.java
|
||||
cd ../java && mvn clean && mvn package
|
||||
mv ../java/target/kubernetes-cassandra*.jar files/kubernetes-cassandra.jar
|
||||
cd ../java && mvn clean
|
||||
|
||||
container:
|
||||
@echo "Building ${PROJECT}/cassandra:${VERSION}"
|
||||
docker build --pull --build-arg "CASSANDRA_VERSION=${CASSANDRA_VERSION}" -t ${PROJECT}/cassandra:${VERSION} .
|
||||
|
||||
container-dev:
|
||||
docker build --pull --build-arg "CASSANDRA_VERSION=${CASSANDRA_VERSION}" --build-arg "DEV_CONTAINER=true" -t ${PROJECT}/cassandra:${VERSION}-dev .
|
||||
|
||||
build: container container-dev
|
||||
|
||||
push: build
|
||||
gcloud docker -- push ${PROJECT}/cassandra:${VERSION}
|
||||
gcloud docker -- push ${PROJECT}/cassandra:${VERSION}-dev
|
||||
|
||||
.PHONY: all build push
|
990
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml
generated
vendored
Normal file
990
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml
generated
vendored
Normal file
@ -0,0 +1,990 @@
|
||||
# Cassandra storage config YAML
|
||||
|
||||
# NOTE:
|
||||
# See http://wiki.apache.org/cassandra/StorageConfiguration for
|
||||
# full explanations of configuration directives
|
||||
# /NOTE
|
||||
|
||||
# The name of the cluster. This is mainly used to prevent machines in
|
||||
# one logical cluster from joining another.
|
||||
cluster_name: 'Test Cluster'
|
||||
|
||||
# This defines the number of tokens randomly assigned to this node on the ring
|
||||
# The more tokens, relative to other nodes, the larger the proportion of data
|
||||
# that this node will store. You probably want all nodes to have the same number
|
||||
# of tokens assuming they have equal hardware capability.
|
||||
#
|
||||
# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
|
||||
# and will use the initial_token as described below.
|
||||
#
|
||||
# Specifying initial_token will override this setting on the node's initial start,
|
||||
# on subsequent starts, this setting will apply even if initial token is set.
|
||||
#
|
||||
# If you already have a cluster with 1 token per node, and wish to migrate to
|
||||
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
|
||||
num_tokens: 256
|
||||
|
||||
# Triggers automatic allocation of num_tokens tokens for this node. The allocation
|
||||
# algorithm attempts to choose tokens in a way that optimizes replicated load over
|
||||
# the nodes in the datacenter for the replication strategy used by the specified
|
||||
# keyspace.
|
||||
#
|
||||
# The load assigned to each node will be close to proportional to its number of
|
||||
# vnodes.
|
||||
#
|
||||
# Only supported with the Murmur3Partitioner.
|
||||
# allocate_tokens_for_keyspace: KEYSPACE
|
||||
|
||||
# initial_token allows you to specify tokens manually. While you can use # it with
|
||||
# vnodes (num_tokens > 1, above) -- in which case you should provide a
|
||||
# comma-separated list -- it's primarily used when adding nodes # to legacy clusters
|
||||
# that do not have vnodes enabled.
|
||||
# initial_token:
|
||||
|
||||
# See http://wiki.apache.org/cassandra/HintedHandoff
|
||||
# May either be "true" or "false" to enable globally
|
||||
hinted_handoff_enabled: true
|
||||
# When hinted_handoff_enabled is true, a black list of data centers that will not
|
||||
# perform hinted handoff
|
||||
# hinted_handoff_disabled_datacenters:
|
||||
# - DC1
|
||||
# - DC2
|
||||
# this defines the maximum amount of time a dead host will have hints
|
||||
# generated. After it has been dead this long, new hints for it will not be
|
||||
# created until it has been seen alive and gone down again.
|
||||
max_hint_window_in_ms: 10800000 # 3 hours
|
||||
|
||||
# Maximum throttle in KBs per second, per delivery thread. This will be
|
||||
# reduced proportionally to the number of nodes in the cluster. (If there
|
||||
# are two nodes in the cluster, each delivery thread will use the maximum
|
||||
# rate; if there are three, each will throttle to half of the maximum,
|
||||
# since we expect two nodes to be delivering hints simultaneously.)
|
||||
hinted_handoff_throttle_in_kb: 1024
|
||||
|
||||
# Number of threads with which to deliver hints;
|
||||
# Consider increasing this number when you have multi-dc deployments, since
|
||||
# cross-dc handoff tends to be slower
|
||||
max_hints_delivery_threads: 2
|
||||
|
||||
# Directory where Cassandra should store hints.
|
||||
# If not set, the default directory is $CASSANDRA_HOME/data/hints.
|
||||
hints_directory: /cassandra_data/hints
|
||||
|
||||
# How often hints should be flushed from the internal buffers to disk.
|
||||
# Will *not* trigger fsync.
|
||||
hints_flush_period_in_ms: 10000
|
||||
|
||||
# Maximum size for a single hints file, in megabytes.
|
||||
max_hints_file_size_in_mb: 128
|
||||
|
||||
# Compression to apply to the hint files. If omitted, hints files
|
||||
# will be written uncompressed. LZ4, Snappy, and Deflate compressors
|
||||
# are supported.
|
||||
#hints_compression:
|
||||
# - class_name: LZ4Compressor
|
||||
# parameters:
|
||||
# -
|
||||
|
||||
# Maximum throttle in KBs per second, total. This will be
|
||||
# reduced proportionally to the number of nodes in the cluster.
|
||||
batchlog_replay_throttle_in_kb: 1024
|
||||
|
||||
# Authentication backend, implementing IAuthenticator; used to identify users
|
||||
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
|
||||
# PasswordAuthenticator}.
|
||||
#
|
||||
# - AllowAllAuthenticator performs no checks - set it to disable authentication.
|
||||
# - PasswordAuthenticator relies on username/password pairs to authenticate
|
||||
# users. It keeps usernames and hashed passwords in system_auth.credentials table.
|
||||
# Please increase system_auth keyspace replication factor if you use this authenticator.
|
||||
# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
|
||||
authenticator: AllowAllAuthenticator
|
||||
|
||||
# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
|
||||
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
|
||||
# CassandraAuthorizer}.
|
||||
#
|
||||
# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
|
||||
# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
|
||||
# increase system_auth keyspace replication factor if you use this authorizer.
|
||||
authorizer: AllowAllAuthorizer
|
||||
|
||||
# Part of the Authentication & Authorization backend, implementing IRoleManager; used
|
||||
# to maintain grants and memberships between roles.
|
||||
# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
|
||||
# which stores role information in the system_auth keyspace. Most functions of the
|
||||
# IRoleManager require an authenticated login, so unless the configured IAuthenticator
|
||||
# actually implements authentication, most of this functionality will be unavailable.
|
||||
#
|
||||
# - CassandraRoleManager stores role data in the system_auth keyspace. Please
|
||||
# increase system_auth keyspace replication factor if you use this role manager.
|
||||
role_manager: CassandraRoleManager
|
||||
|
||||
# Validity period for roles cache (fetching granted roles can be an expensive
|
||||
# operation depending on the role manager, CassandraRoleManager is one example)
|
||||
# Granted roles are cached for authenticated sessions in AuthenticatedUser and
|
||||
# after the period specified here, become eligible for (async) reload.
|
||||
# Defaults to 2000, set to 0 to disable caching entirely.
|
||||
# Will be disabled automatically for AllowAllAuthenticator.
|
||||
roles_validity_in_ms: 2000
|
||||
|
||||
# Refresh interval for roles cache (if enabled).
|
||||
# After this interval, cache entries become eligible for refresh. Upon next
|
||||
# access, an async reload is scheduled and the old value returned until it
|
||||
# completes. If roles_validity_in_ms is non-zero, then this must be
|
||||
# also.
|
||||
# Defaults to the same value as roles_validity_in_ms.
|
||||
# roles_update_interval_in_ms: 2000
|
||||
|
||||
# Validity period for permissions cache (fetching permissions can be an
|
||||
# expensive operation depending on the authorizer, CassandraAuthorizer is
|
||||
# one example). Defaults to 2000, set to 0 to disable.
|
||||
# Will be disabled automatically for AllowAllAuthorizer.
|
||||
permissions_validity_in_ms: 2000
|
||||
|
||||
# Refresh interval for permissions cache (if enabled).
|
||||
# After this interval, cache entries become eligible for refresh. Upon next
|
||||
# access, an async reload is scheduled and the old value returned until it
|
||||
# completes. If permissions_validity_in_ms is non-zero, then this must be
|
||||
# also.
|
||||
# Defaults to the same value as permissions_validity_in_ms.
|
||||
# permissions_update_interval_in_ms: 2000
|
||||
|
||||
# Validity period for credentials cache. This cache is tightly coupled to
|
||||
# the provided PasswordAuthenticator implementation of IAuthenticator. If
|
||||
# another IAuthenticator implementation is configured, this cache will not
|
||||
# be automatically used and so the following settings will have no effect.
|
||||
# Please note, credentials are cached in their encrypted form, so while
|
||||
# activating this cache may reduce the number of queries made to the
|
||||
# underlying table, it may not bring a significant reduction in the
|
||||
# latency of individual authentication attempts.
|
||||
# Defaults to 2000, set to 0 to disable credentials caching.
|
||||
credentials_validity_in_ms: 2000
|
||||
|
||||
# Refresh interval for credentials cache (if enabled).
|
||||
# After this interval, cache entries become eligible for refresh. Upon next
|
||||
# access, an async reload is scheduled and the old value returned until it
|
||||
# completes. If credentials_validity_in_ms is non-zero, then this must be
|
||||
# also.
|
||||
# Defaults to the same value as credentials_validity_in_ms.
|
||||
# credentials_update_interval_in_ms: 2000
|
||||
|
||||
# The partitioner is responsible for distributing groups of rows (by
|
||||
# partition key) across nodes in the cluster. You should leave this
|
||||
# alone for new clusters. The partitioner can NOT be changed without
|
||||
# reloading all data, so when upgrading you should set this to the
|
||||
# same partitioner you were already using.
|
||||
#
|
||||
# Besides Murmur3Partitioner, partitioners included for backwards
|
||||
# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
|
||||
# OrderPreservingPartitioner.
|
||||
#
|
||||
partitioner: org.apache.cassandra.dht.Murmur3Partitioner
|
||||
|
||||
# Directories where Cassandra should store data on disk. Cassandra
|
||||
# will spread data evenly across them, subject to the granularity of
|
||||
# the configured compaction strategy.
|
||||
# If not set, the default directory is $CASSANDRA_HOME/data/data.
|
||||
data_file_directories:
|
||||
- /cassandra_data/data
|
||||
|
||||
# commit log. when running on magnetic HDD, this should be a
|
||||
# separate spindle than the data directories.
|
||||
# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
|
||||
commitlog_directory: /cassandra_data/commitlog
|
||||
|
||||
# policy for data disk failures:
|
||||
# die: shut down gossip and client transports and kill the JVM for any fs errors or
|
||||
# single-sstable errors, so the node can be replaced.
|
||||
# stop_paranoid: shut down gossip and client transports even for single-sstable errors,
|
||||
# kill the JVM for errors during startup.
|
||||
# stop: shut down gossip and client transports, leaving the node effectively dead, but
|
||||
# can still be inspected via JMX, kill the JVM for errors during startup.
|
||||
# best_effort: stop using the failed disk and respond to requests based on
|
||||
# remaining available sstables. This means you WILL see obsolete
|
||||
# data at CL.ONE!
|
||||
# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
|
||||
disk_failure_policy: stop
|
||||
|
||||
# policy for commit disk failures:
|
||||
# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced.
|
||||
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
|
||||
# can still be inspected via JMX.
|
||||
# stop_commit: shutdown the commit log, letting writes collect but
|
||||
# continuing to service reads, as in pre-2.0.5 Cassandra
|
||||
# ignore: ignore fatal errors and let the batches fail
|
||||
commit_failure_policy: stop
|
||||
|
||||
# Maximum size of the key cache in memory.
|
||||
#
|
||||
# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
|
||||
# minimum, sometimes more. The key cache is fairly tiny for the amount of
|
||||
# time it saves, so it's worthwhile to use it at large numbers.
|
||||
# The row cache saves even more time, but must contain the entire row,
|
||||
# so it is extremely space-intensive. It's best to only use the
|
||||
# row cache if you have hot rows or static rows.
|
||||
#
|
||||
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
|
||||
#
|
||||
# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
|
||||
key_cache_size_in_mb:
|
||||
|
||||
# Duration in seconds after which Cassandra should
|
||||
# save the key cache. Caches are saved to saved_caches_directory as
|
||||
# specified in this configuration file.
|
||||
#
|
||||
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
|
||||
# terms of I/O for the key cache. Row cache saving is much more expensive and
|
||||
# has limited use.
|
||||
#
|
||||
# Default is 14400 or 4 hours.
|
||||
key_cache_save_period: 14400
|
||||
|
||||
# Number of keys from the key cache to save
|
||||
# Disabled by default, meaning all keys are going to be saved
|
||||
# key_cache_keys_to_save: 100
|
||||
|
||||
# Row cache implementation class name.
|
||||
# Available implementations:
|
||||
# org.apache.cassandra.cache.OHCProvider Fully off-heap row cache implementation (default).
|
||||
# org.apache.cassandra.cache.SerializingCacheProvider This is the row cache implementation availabile
|
||||
# in previous releases of Cassandra.
|
||||
# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
|
||||
|
||||
# Maximum size of the row cache in memory.
|
||||
# Please note that OHC cache implementation requires some additional off-heap memory to manage
|
||||
# the map structures and some in-flight memory during operations before/after cache entries can be
|
||||
# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
|
||||
# Do not specify more memory that the system can afford in the worst usual situation and leave some
|
||||
# headroom for OS block level cache. Do never allow your system to swap.
|
||||
#
|
||||
# Default value is 0, to disable row caching.
|
||||
row_cache_size_in_mb: 0
|
||||
|
||||
# Duration in seconds after which Cassandra should save the row cache.
|
||||
# Caches are saved to saved_caches_directory as specified in this configuration file.
|
||||
#
|
||||
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
|
||||
# terms of I/O for the key cache. Row cache saving is much more expensive and
|
||||
# has limited use.
|
||||
#
|
||||
# Default is 0 to disable saving the row cache.
|
||||
row_cache_save_period: 0
|
||||
|
||||
# Number of keys from the row cache to save.
|
||||
# Specify 0 (which is the default), meaning all keys are going to be saved
|
||||
# row_cache_keys_to_save: 100
|
||||
|
||||
# Maximum size of the counter cache in memory.
|
||||
#
|
||||
# Counter cache helps to reduce counter locks' contention for hot counter cells.
|
||||
# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
|
||||
# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
|
||||
# of the lock hold, helping with hot counter cell updates, but will not allow skipping
|
||||
# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
|
||||
# in memory, not the whole counter, so it's relatively cheap.
|
||||
#
|
||||
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
|
||||
#
|
||||
# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
|
||||
# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
|
||||
counter_cache_size_in_mb:
|
||||
|
||||
# Duration in seconds after which Cassandra should
|
||||
# save the counter cache (keys only). Caches are saved to saved_caches_directory as
|
||||
# specified in this configuration file.
|
||||
#
|
||||
# Default is 7200 or 2 hours.
|
||||
counter_cache_save_period: 7200
|
||||
|
||||
# Number of keys from the counter cache to save
|
||||
# Disabled by default, meaning all keys are going to be saved
|
||||
# counter_cache_keys_to_save: 100
|
||||
|
||||
# saved caches
|
||||
# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
|
||||
saved_caches_directory: /cassandra_data/saved_caches
|
||||
|
||||
# commitlog_sync may be either "periodic" or "batch."
|
||||
#
|
||||
# When in batch mode, Cassandra won't ack writes until the commit log
|
||||
# has been fsynced to disk. It will wait
|
||||
# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
|
||||
# This window should be kept short because the writer threads will
|
||||
# be unable to do extra work while waiting. (You may need to increase
|
||||
# concurrent_writes for the same reason.)
|
||||
#
|
||||
# commitlog_sync: batch
|
||||
# commitlog_sync_batch_window_in_ms: 2
|
||||
#
|
||||
# the other option is "periodic" where writes may be acked immediately
|
||||
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
|
||||
# milliseconds.
|
||||
commitlog_sync: periodic
|
||||
commitlog_sync_period_in_ms: 10000
|
||||
|
||||
# The size of the individual commitlog file segments. A commitlog
|
||||
# segment may be archived, deleted, or recycled once all the data
|
||||
# in it (potentially from each columnfamily in the system) has been
|
||||
# flushed to sstables.
|
||||
#
|
||||
# The default size is 32, which is almost always fine, but if you are
|
||||
# archiving commitlog segments (see commitlog_archiving.properties),
|
||||
# then you probably want a finer granularity of archiving; 8 or 16 MB
|
||||
# is reasonable.
|
||||
# Max mutation size is also configurable via max_mutation_size_in_kb setting in
|
||||
# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
|
||||
#
|
||||
# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
|
||||
# be set to at least twice the size of max_mutation_size_in_kb / 1024
|
||||
#
|
||||
commitlog_segment_size_in_mb: 32
|
||||
|
||||
# Compression to apply to the commit log. If omitted, the commit log
|
||||
# will be written uncompressed. LZ4, Snappy, and Deflate compressors
|
||||
# are supported.
|
||||
#commitlog_compression:
|
||||
# - class_name: LZ4Compressor
|
||||
# parameters:
|
||||
# -
|
||||
|
||||
# any class that implements the SeedProvider interface and has a
|
||||
# constructor that takes a Map<String, String> of parameters will do.
|
||||
seed_provider:
|
||||
# Addresses of hosts that are deemed contact points.
|
||||
# Cassandra nodes use this list of hosts to find each other and learn
|
||||
# the topology of the ring. You must change this if you are running
|
||||
# multiple nodes!
|
||||
#- class_name: io.k8s.cassandra.KubernetesSeedProvider
|
||||
- class_name: SEED_PROVIDER
|
||||
parameters:
|
||||
# seeds is actually a comma-delimited list of addresses.
|
||||
# Ex: "<ip1>,<ip2>,<ip3>"
|
||||
- seeds: "127.0.0.1"
|
||||
|
||||
# For workloads with more data than can fit in memory, Cassandra's
|
||||
# bottleneck will be reads that need to fetch data from
|
||||
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
|
||||
# order to allow the operations to enqueue low enough in the stack
|
||||
# that the OS and drives can reorder them. Same applies to
|
||||
# "concurrent_counter_writes", since counter writes read the current
|
||||
# values before incrementing and writing them back.
|
||||
#
|
||||
# On the other hand, since writes are almost never IO bound, the ideal
|
||||
# number of "concurrent_writes" is dependent on the number of cores in
|
||||
# your system; (8 * number_of_cores) is a good rule of thumb.
|
||||
concurrent_reads: 32
|
||||
concurrent_writes: 32
|
||||
concurrent_counter_writes: 32
|
||||
|
||||
# For materialized view writes, as there is a read involved, so this should
|
||||
# be limited by the less of concurrent reads or concurrent writes.
|
||||
concurrent_materialized_view_writes: 32
|
||||
|
||||
# Maximum memory to use for pooling sstable buffers. Defaults to the smaller
|
||||
# of 1/4 of heap or 512MB. This pool is allocated off-heap, so is in addition
|
||||
# to the memory allocated for heap. Memory is only allocated as needed.
|
||||
# file_cache_size_in_mb: 512
|
||||
|
||||
# Flag indicating whether to allocate on or off heap when the sstable buffer
|
||||
# pool is exhausted, that is when it has exceeded the maximum memory
|
||||
# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
|
||||
|
||||
# buffer_pool_use_heap_if_exhausted: true
|
||||
|
||||
# The strategy for optimizing disk read
|
||||
# Possible values are:
|
||||
# ssd (for solid state disks, the default)
|
||||
# spinning (for spinning disks)
|
||||
# disk_optimization_strategy: ssd
|
||||
|
||||
# Total permitted memory to use for memtables. Cassandra will stop
|
||||
# accepting writes when the limit is exceeded until a flush completes,
|
||||
# and will trigger a flush based on memtable_cleanup_threshold
|
||||
# If omitted, Cassandra will set both to 1/4 the size of the heap.
|
||||
# memtable_heap_space_in_mb: 2048
|
||||
# memtable_offheap_space_in_mb: 2048
|
||||
|
||||
# Ratio of occupied non-flushing memtable size to total permitted size
|
||||
# that will trigger a flush of the largest memtable. Larger mct will
|
||||
# mean larger flushes and hence less compaction, but also less concurrent
|
||||
# flush activity which can make it difficult to keep your disks fed
|
||||
# under heavy write load.
|
||||
#
|
||||
# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
|
||||
# memtable_cleanup_threshold: 0.11
|
||||
|
||||
# Specify the way Cassandra allocates and manages memtable memory.
|
||||
# Options are:
|
||||
# heap_buffers: on heap nio buffers
|
||||
# offheap_buffers: off heap (direct) nio buffers
|
||||
# offheap_objects: off heap objects
|
||||
memtable_allocation_type: heap_buffers
|
||||
|
||||
# Total space to use for commit logs on disk.
|
||||
#
|
||||
# If space gets above this value, Cassandra will flush every dirty CF
|
||||
# in the oldest segment and remove it. So a small total commitlog space
|
||||
# will tend to cause more flush activity on less-active columnfamilies.
|
||||
#
|
||||
# The default value is the smaller of 8192, and 1/4 of the total space
|
||||
# of the commitlog volume.
|
||||
#
|
||||
# commitlog_total_space_in_mb: 8192
|
||||
|
||||
# This sets the amount of memtable flush writer threads. These will
|
||||
# be blocked by disk io, and each one will hold a memtable in memory
|
||||
# while blocked.
|
||||
#
|
||||
# memtable_flush_writers defaults to one per data_file_directory.
|
||||
#
|
||||
# If your data directories are backed by SSD, you can increase this, but
|
||||
# avoid having memtable_flush_writers * data_file_directories > number of cores
|
||||
#memtable_flush_writers: 1
|
||||
|
||||
# A fixed memory pool size in MB for for SSTable index summaries. If left
|
||||
# empty, this will default to 5% of the heap size. If the memory usage of
|
||||
# all index summaries exceeds this limit, SSTables with low read rates will
|
||||
# shrink their index summaries in order to meet this limit. However, this
|
||||
# is a best-effort process. In extreme conditions Cassandra may need to use
|
||||
# more than this amount of memory.
|
||||
index_summary_capacity_in_mb:
|
||||
|
||||
# How frequently index summaries should be resampled. This is done
|
||||
# periodically to redistribute memory from the fixed-size pool to sstables
|
||||
# proportional their recent read rates. Setting to -1 will disable this
|
||||
# process, leaving existing index summaries at their current sampling level.
|
||||
index_summary_resize_interval_in_minutes: 60
|
||||
|
||||
# Whether to, when doing sequential writing, fsync() at intervals in
|
||||
# order to force the operating system to flush the dirty
|
||||
# buffers. Enable this to avoid sudden dirty buffer flushing from
|
||||
# impacting read latencies. Almost always a good idea on SSDs; not
|
||||
# necessarily on platters.
|
||||
trickle_fsync: false
|
||||
trickle_fsync_interval_in_kb: 10240
|
||||
|
||||
# TCP port, for commands and data
|
||||
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
|
||||
storage_port: 7000
|
||||
|
||||
# SSL port, for encrypted communication. Unused unless enabled in
|
||||
# encryption_options
|
||||
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
|
||||
ssl_storage_port: 7001
|
||||
|
||||
# Address or interface to bind to and tell other Cassandra nodes to connect to.
|
||||
# You _must_ change this if you want multiple nodes to be able to communicate!
|
||||
#
|
||||
# Set listen_address OR listen_interface, not both. Interfaces must correspond
|
||||
# to a single address, IP aliasing is not supported.
|
||||
#
|
||||
# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
|
||||
# will always do the Right Thing _if_ the node is properly configured
|
||||
# (hostname, name resolution, etc), and the Right Thing is to use the
|
||||
# address associated with the hostname (it might not be).
|
||||
#
|
||||
# Setting listen_address to 0.0.0.0 is always wrong.
|
||||
#
|
||||
# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
|
||||
# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
|
||||
# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
|
||||
# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
|
||||
listen_address: localhost
|
||||
# listen_interface: eth0
|
||||
# listen_interface_prefer_ipv6: false
|
||||
|
||||
# Address to broadcast to other Cassandra nodes
|
||||
# Leaving this blank will set it to the same value as listen_address
|
||||
# broadcast_address: 1.2.3.4
|
||||
|
||||
# When using multiple physical network interfaces, set this
|
||||
# to true to listen on broadcast_address in addition to
|
||||
# the listen_address, allowing nodes to communicate in both
|
||||
# interfaces.
|
||||
# Ignore this property if the network configuration automatically
|
||||
# routes between the public and private networks such as EC2.
|
||||
# listen_on_broadcast_address: false
|
||||
|
||||
# Internode authentication backend, implementing IInternodeAuthenticator;
|
||||
# used to allow/disallow connections from peer nodes.
|
||||
# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
|
||||
|
||||
# Whether to start the native transport server.
|
||||
# Please note that the address on which the native transport is bound is the
|
||||
# same as the rpc_address. The port however is different and specified below.
|
||||
start_native_transport: true
|
||||
# port for the CQL native transport to listen for clients on
|
||||
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
|
||||
native_transport_port: 9042
|
||||
# Enabling native transport encryption in client_encryption_options allows you to either use
|
||||
# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
|
||||
# standard native_transport_port.
|
||||
# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
|
||||
# for native_transport_port. Setting native_transport_port_ssl to a different value
|
||||
# from native_transport_port will use encryption for native_transport_port_ssl while
|
||||
# keeping native_transport_port unencrypted.
|
||||
# native_transport_port_ssl: 9142
|
||||
# The maximum threads for handling requests when the native transport is used.
|
||||
# This is similar to rpc_max_threads though the default differs slightly (and
|
||||
# there is no native_transport_min_threads, idle threads will always be stopped
|
||||
# after 30 seconds).
|
||||
# native_transport_max_threads: 128
|
||||
#
|
||||
# The maximum size of allowed frame. Frame (requests) larger than this will
|
||||
# be rejected as invalid. The default is 256MB.
|
||||
# native_transport_max_frame_size_in_mb: 256
|
||||
|
||||
# The maximum number of concurrent client connections.
|
||||
# The default is -1, which means unlimited.
|
||||
# native_transport_max_concurrent_connections: -1
|
||||
|
||||
# The maximum number of concurrent client connections per source ip.
|
||||
# The default is -1, which means unlimited.
|
||||
# native_transport_max_concurrent_connections_per_ip: -1
|
||||
|
||||
# Whether to start the thrift rpc server.
|
||||
start_rpc: false
|
||||
|
||||
# The address or interface to bind the Thrift RPC service and native transport
|
||||
# server to.
|
||||
#
|
||||
# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
|
||||
# to a single address, IP aliasing is not supported.
|
||||
#
|
||||
# Leaving rpc_address blank has the same effect as on listen_address
|
||||
# (i.e. it will be based on the configured hostname of the node).
|
||||
#
|
||||
# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
|
||||
# set broadcast_rpc_address to a value other than 0.0.0.0.
|
||||
#
|
||||
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
|
||||
#
|
||||
# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
|
||||
# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
|
||||
# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
|
||||
# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
|
||||
rpc_address: localhost
|
||||
# rpc_interface: eth1
|
||||
# rpc_interface_prefer_ipv6: false
|
||||
|
||||
# port for Thrift to listen for clients on
|
||||
rpc_port: 9160
|
||||
|
||||
# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
|
||||
# be set to 0.0.0.0. If left blank, this will be set to the value of
|
||||
# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
|
||||
# be set.
|
||||
# broadcast_rpc_address: 1.2.3.4
|
||||
|
||||
# enable or disable keepalive on rpc/native connections
|
||||
rpc_keepalive: true
|
||||
|
||||
# Cassandra provides two out-of-the-box options for the RPC Server:
|
||||
#
|
||||
# sync -> One thread per thrift connection. For a very large number of clients, memory
|
||||
# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
|
||||
# per thread, and that will correspond to your use of virtual memory (but physical memory
|
||||
# may be limited depending on use of stack space).
|
||||
#
|
||||
# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
|
||||
# asynchronously using a small number of threads that does not vary with the amount
|
||||
# of thrift clients (and thus scales well to many clients). The rpc requests are still
|
||||
# synchronous (one thread per active request). If hsha is selected then it is essential
|
||||
# that rpc_max_threads is changed from the default value of unlimited.
|
||||
#
|
||||
# The default is sync because on Windows hsha is about 30% slower. On Linux,
|
||||
# sync/hsha performance is about the same, with hsha of course using less memory.
|
||||
#
|
||||
# Alternatively, can provide your own RPC server by providing the fully-qualified class name
|
||||
# of an o.a.c.t.TServerFactory that can create an instance of it.
|
||||
rpc_server_type: sync
|
||||
|
||||
# Uncomment rpc_min|max_thread to set request pool size limits.
|
||||
#
|
||||
# Regardless of your choice of RPC server (see above), the number of maximum requests in the
|
||||
# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
|
||||
# RPC server, it also dictates the number of clients that can be connected at all).
|
||||
#
|
||||
# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
|
||||
# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
|
||||
# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
|
||||
#
|
||||
# rpc_min_threads: 16
|
||||
# rpc_max_threads: 2048
|
||||
|
||||
# uncomment to set socket buffer sizes on rpc connections
|
||||
# rpc_send_buff_size_in_bytes:
|
||||
# rpc_recv_buff_size_in_bytes:
|
||||
|
||||
# Uncomment to set socket buffer size for internode communication
|
||||
# Note that when setting this, the buffer size is limited by net.core.wmem_max
|
||||
# and when not setting it it is defined by net.ipv4.tcp_wmem
|
||||
# See:
|
||||
# /proc/sys/net/core/wmem_max
|
||||
# /proc/sys/net/core/rmem_max
|
||||
# /proc/sys/net/ipv4/tcp_wmem
|
||||
# /proc/sys/net/ipv4/tcp_wmem
|
||||
# and: man tcp
|
||||
# internode_send_buff_size_in_bytes:
|
||||
# internode_recv_buff_size_in_bytes:
|
||||
|
||||
# Frame size for thrift (maximum message length).
|
||||
thrift_framed_transport_size_in_mb: 15
|
||||
|
||||
# Set to true to have Cassandra create a hard link to each sstable
|
||||
# flushed or streamed locally in a backups/ subdirectory of the
|
||||
# keyspace data. Removing these links is the operator's
|
||||
# responsibility.
|
||||
incremental_backups: false
|
||||
|
||||
# Whether or not to take a snapshot before each compaction. Be
|
||||
# careful using this option, since Cassandra won't clean up the
|
||||
# snapshots for you. Mostly useful if you're paranoid when there
|
||||
# is a data format change.
|
||||
snapshot_before_compaction: false
|
||||
|
||||
# Whether or not a snapshot is taken of the data before keyspace truncation
|
||||
# or dropping of column families. The STRONGLY advised default of true
|
||||
# should be used to provide data safety. If you set this flag to false, you will
|
||||
# lose data on truncation or drop.
|
||||
auto_snapshot: true
|
||||
|
||||
# When executing a scan, within or across a partition, we need to keep the
|
||||
# tombstones seen in memory so we can return them to the coordinator, which
|
||||
# will use them to make sure other replicas also know about the deleted rows.
|
||||
# With workloads that generate a lot of tombstones, this can cause performance
|
||||
# problems and even exaust the server heap.
|
||||
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
|
||||
# Adjust the thresholds here if you understand the dangers and want to
|
||||
# scan more tombstones anyway. These thresholds may also be adjusted at runtime
|
||||
# using the StorageService mbean.
|
||||
tombstone_warn_threshold: 1000
|
||||
tombstone_failure_threshold: 100000
|
||||
|
||||
# Granularity of the collation index of rows within a partition.
|
||||
# Increase if your rows are large, or if you have a very large
|
||||
# number of rows per partition. The competing goals are these:
|
||||
# 1) a smaller granularity means more index entries are generated
|
||||
# and looking up rows within the partition by collation column
|
||||
# is faster
|
||||
# 2) but, Cassandra will keep the collation index in memory for hot
|
||||
# rows (as part of the key cache), so a larger granularity means
|
||||
# you can cache more hot rows
|
||||
column_index_size_in_kb: 64
|
||||
|
||||
|
||||
# Log WARN on any batch size exceeding this value. 5kb per batch by default.
|
||||
# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
|
||||
batch_size_warn_threshold_in_kb: 5
|
||||
|
||||
# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
|
||||
batch_size_fail_threshold_in_kb: 50
|
||||
|
||||
# Number of simultaneous compactions to allow, NOT including
|
||||
# validation "compactions" for anti-entropy repair. Simultaneous
|
||||
# compactions can help preserve read performance in a mixed read/write
|
||||
# workload, by mitigating the tendency of small sstables to accumulate
|
||||
# during a single long running compactions. The default is usually
|
||||
# fine and if you experience problems with compaction running too
|
||||
# slowly or too fast, you should look at
|
||||
# compaction_throughput_mb_per_sec first.
|
||||
#
|
||||
# concurrent_compactors defaults to the smaller of (number of disks,
|
||||
# number of cores), with a minimum of 2 and a maximum of 8.
|
||||
#
|
||||
# If your data directories are backed by SSD, you should increase this
|
||||
# to the number of cores.
|
||||
#concurrent_compactors: 1
|
||||
|
||||
# Throttles compaction to the given total throughput across the entire
|
||||
# system. The faster you insert data, the faster you need to compact in
|
||||
# order to keep the sstable count down, but in general, setting this to
|
||||
# 16 to 32 times the rate you are inserting data is more than sufficient.
|
||||
# Setting this to 0 disables throttling. Note that this account for all types
|
||||
# of compaction, including validation compaction.
|
||||
compaction_throughput_mb_per_sec: 16
|
||||
|
||||
# Log a warning when compacting partitions larger than this value
|
||||
compaction_large_partition_warning_threshold_mb: 100
|
||||
|
||||
# When compacting, the replacement sstable(s) can be opened before they
|
||||
# are completely written, and used in place of the prior sstables for
|
||||
# any range that has been written. This helps to smoothly transfer reads
|
||||
# between the sstables, reducing page cache churn and keeping hot rows hot
|
||||
sstable_preemptive_open_interval_in_mb: 50
|
||||
|
||||
# Throttles all outbound streaming file transfers on this node to the
|
||||
# given total throughput in Mbps. This is necessary because Cassandra does
|
||||
# mostly sequential IO when streaming data during bootstrap or repair, which
|
||||
# can lead to saturating the network connection and degrading rpc performance.
|
||||
# When unset, the default is 200 Mbps or 25 MB/s.
|
||||
# stream_throughput_outbound_megabits_per_sec: 200
|
||||
|
||||
# Throttles all streaming file transfer between the datacenters,
|
||||
# this setting allows users to throttle inter dc stream throughput in addition
|
||||
# to throttling all network stream traffic as configured with
|
||||
# stream_throughput_outbound_megabits_per_sec
|
||||
# When unset, the default is 200 Mbps or 25 MB/s
|
||||
# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
|
||||
|
||||
# How long the coordinator should wait for read operations to complete
|
||||
read_request_timeout_in_ms: 5000
|
||||
# How long the coordinator should wait for seq or index scans to complete
|
||||
range_request_timeout_in_ms: 10000
|
||||
# How long the coordinator should wait for writes to complete
|
||||
write_request_timeout_in_ms: 2000
|
||||
# How long the coordinator should wait for counter writes to complete
|
||||
counter_write_request_timeout_in_ms: 5000
|
||||
# How long a coordinator should continue to retry a CAS operation
|
||||
# that contends with other proposals for the same row
|
||||
cas_contention_timeout_in_ms: 1000
|
||||
# How long the coordinator should wait for truncates to complete
|
||||
# (This can be much longer, because unless auto_snapshot is disabled
|
||||
# we need to flush first so we can snapshot before removing the data.)
|
||||
truncate_request_timeout_in_ms: 60000
|
||||
# The default timeout for other, miscellaneous operations
|
||||
request_timeout_in_ms: 10000
|
||||
|
||||
# Enable operation timeout information exchange between nodes to accurately
|
||||
# measure request timeouts. If disabled, replicas will assume that requests
|
||||
# were forwarded to them instantly by the coordinator, which means that
|
||||
# under overload conditions we will waste that much extra time processing
|
||||
# already-timed-out requests.
|
||||
#
|
||||
# Warning: before enabling this property make sure to ntp is installed
|
||||
# and the times are synchronized between the nodes.
|
||||
cross_node_timeout: false
|
||||
|
||||
# Set socket timeout for streaming operation.
|
||||
# The stream session is failed if no data is received by any of the
|
||||
# participants within that period.
|
||||
# Default value is 3600000, which means streams timeout after an hour.
|
||||
# streaming_socket_timeout_in_ms: 3600000
|
||||
|
||||
# phi value that must be reached for a host to be marked down.
|
||||
# most users should never need to adjust this.
|
||||
# phi_convict_threshold: 8
|
||||
|
||||
# endpoint_snitch -- Set this to a class that implements
|
||||
# IEndpointSnitch. The snitch has two functions:
|
||||
# - it teaches Cassandra enough about your network topology to route
|
||||
# requests efficiently
|
||||
# - it allows Cassandra to spread replicas around your cluster to avoid
|
||||
# correlated failures. It does this by grouping machines into
|
||||
# "datacenters" and "racks." Cassandra will do its best not to have
|
||||
# more than one replica on the same "rack" (which may not actually
|
||||
# be a physical location)
|
||||
#
|
||||
# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
|
||||
# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
|
||||
# ARE PLACED.
|
||||
#
|
||||
# IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN
|
||||
# ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED.
|
||||
#
|
||||
# Out of the box, Cassandra provides
|
||||
# - SimpleSnitch:
|
||||
# Treats Strategy order as proximity. This can improve cache
|
||||
# locality when disabling read repair. Only appropriate for
|
||||
# single-datacenter deployments.
|
||||
# - GossipingPropertyFileSnitch
|
||||
# This should be your go-to snitch for production use. The rack
|
||||
# and datacenter for the local node are defined in
|
||||
# cassandra-rackdc.properties and propagated to other nodes via
|
||||
# gossip. If cassandra-topology.properties exists, it is used as a
|
||||
# fallback, allowing migration from the PropertyFileSnitch.
|
||||
# - PropertyFileSnitch:
|
||||
# Proximity is determined by rack and data center, which are
|
||||
# explicitly configured in cassandra-topology.properties.
|
||||
# - Ec2Snitch:
|
||||
# Appropriate for EC2 deployments in a single Region. Loads Region
|
||||
# and Availability Zone information from the EC2 API. The Region is
|
||||
# treated as the datacenter, and the Availability Zone as the rack.
|
||||
# Only private IPs are used, so this will not work across multiple
|
||||
# Regions.
|
||||
# - Ec2MultiRegionSnitch:
|
||||
# Uses public IPs as broadcast_address to allow cross-region
|
||||
# connectivity. (Thus, you should set seed addresses to the public
|
||||
# IP as well.) You will need to open the storage_port or
|
||||
# ssl_storage_port on the public IP firewall. (For intra-Region
|
||||
# traffic, Cassandra will switch to the private IP after
|
||||
# establishing a connection.)
|
||||
# - RackInferringSnitch:
|
||||
# Proximity is determined by rack and data center, which are
|
||||
# assumed to correspond to the 3rd and 2nd octet of each node's IP
|
||||
# address, respectively. Unless this happens to match your
|
||||
# deployment conventions, this is best used as an example of
|
||||
# writing a custom Snitch class and is provided in that spirit.
|
||||
#
|
||||
# You can use a custom Snitch by setting this to the full class name
|
||||
# of the snitch, which will be assumed to be on your classpath.
|
||||
endpoint_snitch: SimpleSnitch
|
||||
|
||||
# controls how often to perform the more expensive part of host score
|
||||
# calculation
|
||||
dynamic_snitch_update_interval_in_ms: 100
|
||||
# controls how often to reset all host scores, allowing a bad host to
|
||||
# possibly recover
|
||||
dynamic_snitch_reset_interval_in_ms: 600000
|
||||
# if set greater than zero and read_repair_chance is < 1.0, this will allow
|
||||
# 'pinning' of replicas to hosts in order to increase cache capacity.
|
||||
# The badness threshold will control how much worse the pinned host has to be
|
||||
# before the dynamic snitch will prefer other replicas over it. This is
|
||||
# expressed as a double which represents a percentage. Thus, a value of
|
||||
# 0.2 means Cassandra would continue to prefer the static snitch values
|
||||
# until the pinned host was 20% worse than the fastest.
|
||||
dynamic_snitch_badness_threshold: 0.1
|
||||
|
||||
# request_scheduler -- Set this to a class that implements
|
||||
# RequestScheduler, which will schedule incoming client requests
|
||||
# according to the specific policy. This is useful for multi-tenancy
|
||||
# with a single Cassandra cluster.
|
||||
# NOTE: This is specifically for requests from the client and does
|
||||
# not affect inter node communication.
|
||||
# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
|
||||
# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
|
||||
# client requests to a node with a separate queue for each
|
||||
# request_scheduler_id. The scheduler is further customized by
|
||||
# request_scheduler_options as described below.
|
||||
request_scheduler: org.apache.cassandra.scheduler.NoScheduler
|
||||
|
||||
# Scheduler Options vary based on the type of scheduler
|
||||
# NoScheduler - Has no options
|
||||
# RoundRobin
|
||||
# - throttle_limit -- The throttle_limit is the number of in-flight
|
||||
# requests per client. Requests beyond
|
||||
# that limit are queued up until
|
||||
# running requests can complete.
|
||||
# The value of 80 here is twice the number of
|
||||
# concurrent_reads + concurrent_writes.
|
||||
# - default_weight -- default_weight is optional and allows for
|
||||
# overriding the default which is 1.
|
||||
# - weights -- Weights are optional and will default to 1 or the
|
||||
# overridden default_weight. The weight translates into how
|
||||
# many requests are handled during each turn of the
|
||||
# RoundRobin, based on the scheduler id.
|
||||
#
|
||||
# request_scheduler_options:
|
||||
# throttle_limit: 80
|
||||
# default_weight: 5
|
||||
# weights:
|
||||
# Keyspace1: 1
|
||||
# Keyspace2: 5
|
||||
|
||||
# request_scheduler_id -- An identifier based on which to perform
|
||||
# the request scheduling. Currently the only valid option is keyspace.
|
||||
# request_scheduler_id: keyspace
|
||||
|
||||
# Enable or disable inter-node encryption
|
||||
# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
|
||||
# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
|
||||
# suite for authentication, key exchange and encryption of the actual data transfers.
|
||||
# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
|
||||
# NOTE: No custom encryption options are enabled at the moment
|
||||
# The available internode options are : all, none, dc, rack
|
||||
#
|
||||
# If set to dc cassandra will encrypt the traffic between the DCs
|
||||
# If set to rack cassandra will encrypt the traffic between the racks
|
||||
#
|
||||
# The passwords used in these options must match the passwords used when generating
|
||||
# the keystore and truststore. For instructions on generating these files, see:
|
||||
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
|
||||
#
|
||||
server_encryption_options:
|
||||
internode_encryption: none
|
||||
keystore: conf/.keystore
|
||||
keystore_password: cassandra
|
||||
truststore: conf/.truststore
|
||||
truststore_password: cassandra
|
||||
# More advanced defaults below:
|
||||
# protocol: TLS
|
||||
# algorithm: SunX509
|
||||
# store_type: JKS
|
||||
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
|
||||
# require_client_auth: false
|
||||
|
||||
# enable or disable client/server encryption.
|
||||
client_encryption_options:
|
||||
enabled: false
|
||||
# If enabled and optional is set to true encrypted and unencrypted connections are handled.
|
||||
optional: false
|
||||
keystore: conf/.keystore
|
||||
keystore_password: cassandra
|
||||
# require_client_auth: false
|
||||
# Set trustore and truststore_password if require_client_auth is true
|
||||
# truststore: conf/.truststore
|
||||
# truststore_password: cassandra
|
||||
# More advanced defaults below:
|
||||
# protocol: TLS
|
||||
# algorithm: SunX509
|
||||
# store_type: JKS
|
||||
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
|
||||
|
||||
# internode_compression controls whether traffic between nodes is
|
||||
# compressed.
|
||||
# can be: all - all traffic is compressed
|
||||
# dc - traffic between different datacenters is compressed
|
||||
# none - nothing is compressed.
|
||||
internode_compression: all
|
||||
|
||||
# Enable or disable tcp_nodelay for inter-dc communication.
|
||||
# Disabling it will result in larger (but fewer) network packets being sent,
|
||||
# reducing overhead from the TCP protocol itself, at the cost of increasing
|
||||
# latency if you block for cross-datacenter responses.
|
||||
inter_dc_tcp_nodelay: false
|
||||
|
||||
# TTL for different trace types used during logging of the repair process.
|
||||
tracetype_query_ttl: 86400
|
||||
tracetype_repair_ttl: 604800
|
||||
|
||||
# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
|
||||
# Adjust the threshold based on your application throughput requirement
|
||||
# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
|
||||
gc_warn_threshold_in_ms: 1000
|
||||
|
||||
# UDFs (user defined functions) are disabled by default.
|
||||
# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
|
||||
enable_user_defined_functions: false
|
||||
|
||||
# Enables scripted UDFs (JavaScript UDFs).
|
||||
# Java UDFs are always enabled, if enable_user_defined_functions is true.
|
||||
# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
|
||||
# This option has no effect, if enable_user_defined_functions is false.
|
||||
enable_scripted_user_defined_functions: false
|
||||
|
||||
# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
|
||||
# Lowering this value on Windows can provide much tighter latency and better throughput, however
|
||||
# some virtualized environments may see a negative performance impact from changing this setting
|
||||
# below their system default. The sysinternals 'clockres' tool can confirm your system's default
|
||||
# setting.
|
||||
windows_timer_interval: 1
|
||||
|
||||
|
||||
# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
|
||||
# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
|
||||
# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
|
||||
# can still (and should!) be in the keystore and will be used on decrypt operations
|
||||
# (to handle the case of key rotation).
|
||||
#
|
||||
# It is strongly recommended to download and install Java Cryptography Extension (JCE)
|
||||
# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
|
||||
# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
|
||||
#
|
||||
# Currently, only the following file types are supported for transparent data encryption, although
|
||||
# more are coming in future cassandra releases: commitlog, hints
|
||||
transparent_data_encryption_options:
|
||||
enabled: false
|
||||
chunk_length_kb: 64
|
||||
cipher: AES/CBC/PKCS5Padding
|
||||
key_alias: testing:1
|
||||
# CBC IV length for AES needs to be 16 bytes (which is also the default size)
|
||||
# iv_length: 16
|
||||
key_provider:
|
||||
- class_name: org.apache.cassandra.security.JKSKeyProvider
|
||||
parameters:
|
||||
- keystore: conf/.keystore
|
||||
keystore_password: cassandra
|
||||
store_type: JCEKS
|
||||
key_password: cassandra
|
||||
|
240
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/jvm.options
generated
vendored
Normal file
240
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/jvm.options
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
###########################################################################
|
||||
# jvm.options #
|
||||
# #
|
||||
# - all flags defined here will be used by cassandra to startup the JVM #
|
||||
# - one flag should be specified per line #
|
||||
# - lines that do not start with '-' will be ignored #
|
||||
# - only static flags are accepted (no variables or parameters) #
|
||||
# - dynamic flags will be appended to these on cassandra-env #
|
||||
###########################################################################
|
||||
|
||||
######################
|
||||
# STARTUP PARAMETERS #
|
||||
######################
|
||||
|
||||
# Uncomment any of the following properties to enable specific startup parameters
|
||||
|
||||
# In a multi-instance deployment, multiple Cassandra instances will independently assume that all
|
||||
# CPU processors are available to it. This setting allows you to specify a smaller set of processors
|
||||
# and perhaps have affinity.
|
||||
#-Dcassandra.available_processors=number_of_processors
|
||||
|
||||
# The directory location of the cassandra.yaml file.
|
||||
#-Dcassandra.config=directory
|
||||
|
||||
# Sets the initial partitioner token for a node the first time the node is started.
|
||||
#-Dcassandra.initial_token=token
|
||||
|
||||
# Set to false to start Cassandra on a node but not have the node join the cluster.
|
||||
#-Dcassandra.join_ring=true|false
|
||||
|
||||
# Set to false to clear all gossip state for the node on restart. Use when you have changed node
|
||||
# information in cassandra.yaml (such as listen_address).
|
||||
#-Dcassandra.load_ring_state=true|false
|
||||
|
||||
# Enable pluggable metrics reporter. See Pluggable metrics reporting in Cassandra 2.0.2.
|
||||
#-Dcassandra.metricsReporterConfigFile=file
|
||||
|
||||
# Set the port on which the CQL native transport listens for clients. (Default: 9042)
|
||||
#-Dcassandra.native_transport_port=port
|
||||
|
||||
# Overrides the partitioner. (Default: org.apache.cassandra.dht.Murmur3Partitioner)
|
||||
#-Dcassandra.partitioner=partitioner
|
||||
|
||||
# To replace a node that has died, restart a new node in its place specifying the address of the
|
||||
# dead node. The new node must not have any data in its data directory, that is, it must be in the
|
||||
# same state as before bootstrapping.
|
||||
#-Dcassandra.replace_address=listen_address or broadcast_address of dead node
|
||||
|
||||
# Allow restoring specific tables from an archived commit log.
|
||||
#-Dcassandra.replayList=table
|
||||
|
||||
# Allows overriding of the default RING_DELAY (1000ms), which is the amount of time a node waits
|
||||
# before joining the ring.
|
||||
#-Dcassandra.ring_delay_ms=ms
|
||||
|
||||
# Set the port for the Thrift RPC service, which is used for client connections. (Default: 9160)
|
||||
#-Dcassandra.rpc_port=port
|
||||
|
||||
# Set the SSL port for encrypted communication. (Default: 7001)
|
||||
#-Dcassandra.ssl_storage_port=port
|
||||
|
||||
# Enable or disable the native transport server. See start_native_transport in cassandra.yaml.
|
||||
# cassandra.start_native_transport=true|false
|
||||
|
||||
# Enable or disable the Thrift RPC server. (Default: true)
|
||||
#-Dcassandra.start_rpc=true/false
|
||||
|
||||
# Set the port for inter-node communication. (Default: 7000)
|
||||
#-Dcassandra.storage_port=port
|
||||
|
||||
# Set the default location for the trigger JARs. (Default: conf/triggers)
|
||||
#-Dcassandra.triggers_dir=directory
|
||||
|
||||
# For testing new compaction and compression strategies. It allows you to experiment with different
|
||||
# strategies and benchmark write performance differences without affecting the production workload.
|
||||
#-Dcassandra.write_survey=true
|
||||
|
||||
# To disable configuration via JMX of auth caches (such as those for credentials, permissions and
|
||||
# roles). This will mean those config options can only be set (persistently) in cassandra.yaml
|
||||
# and will require a restart for new values to take effect.
|
||||
#-Dcassandra.disable_auth_caches_remote_configuration=true
|
||||
|
||||
########################
|
||||
# GENERAL JVM SETTINGS #
|
||||
########################
|
||||
|
||||
# enable assertions. disabling this in production will give a modest
|
||||
# performance benefit (around 5%).
|
||||
-ea
|
||||
|
||||
# enable thread priorities, primarily so we can give periodic tasks
|
||||
# a lower priority to avoid interfering with client workload
|
||||
-XX:+UseThreadPriorities
|
||||
|
||||
# allows lowering thread priority without being root on linux - probably
|
||||
# not necessary on Windows but doesn't harm anything.
|
||||
# see http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workar
|
||||
-XX:ThreadPriorityPolicy=42
|
||||
|
||||
# Enable heap-dump if there's an OOM
|
||||
-XX:+HeapDumpOnOutOfMemoryError
|
||||
|
||||
# Per-thread stack size.
|
||||
-Xss256k
|
||||
|
||||
# Larger interned string table, for gossip's benefit (CASSANDRA-6410)
|
||||
-XX:StringTableSize=1000003
|
||||
|
||||
# Make sure all memory is faulted and zeroed on startup.
|
||||
# This helps prevent soft faults in containers and makes
|
||||
# transparent hugepage allocation more effective.
|
||||
-XX:+AlwaysPreTouch
|
||||
|
||||
# Disable biased locking as it does not benefit Cassandra.
|
||||
-XX:-UseBiasedLocking
|
||||
|
||||
# Enable thread-local allocation blocks and allow the JVM to automatically
|
||||
# resize them at runtime.
|
||||
-XX:+UseTLAB
|
||||
-XX:+ResizeTLAB
|
||||
|
||||
# http://www.evanjones.ca/jvm-mmap-pause.html
|
||||
-XX:+PerfDisableSharedMem
|
||||
|
||||
# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See
|
||||
# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version:
|
||||
# comment out this entry to enable IPv6 support).
|
||||
-Djava.net.preferIPv4Stack=true
|
||||
|
||||
### Debug options
|
||||
|
||||
# uncomment to enable flight recorder
|
||||
#-XX:+UnlockCommercialFeatures
|
||||
#-XX:+FlightRecorder
|
||||
|
||||
# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414
|
||||
#-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414
|
||||
|
||||
# uncomment to have Cassandra JVM log internal method compilation (developers only)
|
||||
#-XX:+UnlockDiagnosticVMOptions
|
||||
#-XX:+LogCompilation
|
||||
|
||||
#################
|
||||
# HEAP SETTINGS #
|
||||
#################
|
||||
|
||||
# Heap size is automatically calculated by cassandra-env based on this
|
||||
# formula: max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
|
||||
# That is:
|
||||
# - calculate 1/2 ram and cap to 1024MB
|
||||
# - calculate 1/4 ram and cap to 8192MB
|
||||
# - pick the max
|
||||
#
|
||||
# For production use you may wish to adjust this for your environment.
|
||||
# If that's the case, uncomment the -Xmx and Xms options below to override the
|
||||
# automatic calculation of JVM heap memory.
|
||||
#
|
||||
# It is recommended to set min (-Xms) and max (-Xmx) heap sizes to
|
||||
# the same value to avoid stop-the-world GC pauses during resize, and
|
||||
# so that we can lock the heap in memory on startup to prevent any
|
||||
# of it from being swapped out.
|
||||
#-Xms4G
|
||||
#-Xmx4G
|
||||
|
||||
# Young generation size is automatically calculated by cassandra-env
|
||||
# based on this formula: min(100 * num_cores, 1/4 * heap size)
|
||||
#
|
||||
# The main trade-off for the young generation is that the larger it
|
||||
# is, the longer GC pause times will be. The shorter it is, the more
|
||||
# expensive GC will be (usually).
|
||||
#
|
||||
# It is not recommended to set the young generation size if using the
|
||||
# G1 GC, since that will override the target pause-time goal.
|
||||
# More info: http://www.oracle.com/technetwork/articles/java/g1gc-1984535.html
|
||||
#
|
||||
# The example below assumes a modern 8-core+ machine for decent
|
||||
# times. If in doubt, and if you do not particularly want to tweak, go
|
||||
# 100 MB per physical CPU core.
|
||||
#-Xmn800M
|
||||
|
||||
#################
|
||||
# GC SETTINGS #
|
||||
#################
|
||||
|
||||
### CMS Settings
|
||||
|
||||
#-XX:+UseParNewGC
|
||||
#-XX:+UseConcMarkSweepGC
|
||||
#-XX:+CMSParallelRemarkEnabled
|
||||
#-XX:SurvivorRatio=8
|
||||
#-XX:MaxTenuringThreshold=1
|
||||
#-XX:CMSInitiatingOccupancyFraction=75
|
||||
#-XX:+UseCMSInitiatingOccupancyOnly
|
||||
#-XX:CMSWaitDuration=10000
|
||||
#-XX:+CMSParallelInitialMarkEnabled
|
||||
#-XX:+CMSEdenChunksRecordAlways
|
||||
# some JVMs will fill up their heap when accessed via JMX, see CASSANDRA-6541
|
||||
#-XX:+CMSClassUnloadingEnabled
|
||||
|
||||
### G1 Settings (experimental, comment previous section and uncomment section below to enable)
|
||||
|
||||
## Use the Hotspot garbage-first collector.
|
||||
-XX:+UseG1GC
|
||||
#
|
||||
## Have the JVM do less remembered set work during STW, instead
|
||||
## preferring concurrent GC. Reduces p99.9 latency.
|
||||
-XX:G1RSetUpdatingPauseTimePercent=5
|
||||
#
|
||||
## Main G1GC tunable: lowering the pause target will lower throughput and vise versa.
|
||||
## 200ms is the JVM default and lowest viable setting
|
||||
## 1000ms increases throughput. Keep it smaller than the timeouts in cassandra.yaml.
|
||||
#-XX:MaxGCPauseMillis=500
|
||||
|
||||
## Optional G1 Settings
|
||||
|
||||
# Save CPU time on large (>= 16GB) heaps by delaying region scanning
|
||||
# until the heap is 70% full. The default in Hotspot 8u40 is 40%.
|
||||
#-XX:InitiatingHeapOccupancyPercent=70
|
||||
|
||||
# For systems with > 8 cores, the default ParallelGCThreads is 5/8 the number of logical cores.
|
||||
# Otherwise equal to the number of cores when 8 or less.
|
||||
# Machines with > 10 cores should try setting these to <= full cores.
|
||||
#-XX:ParallelGCThreads=16
|
||||
# By default, ConcGCThreads is 1/4 of ParallelGCThreads.
|
||||
# Setting both to the same value can reduce STW durations.
|
||||
#-XX:ConcGCThreads=16
|
||||
|
||||
### GC logging options -- uncomment to enable
|
||||
|
||||
-XX:+PrintGCDetails
|
||||
-XX:+PrintGCDateStamps
|
||||
-XX:+PrintHeapAtGC
|
||||
-XX:+PrintTenuringDistribution
|
||||
-XX:+PrintGCApplicationStoppedTime
|
||||
-XX:+PrintPromotionFailure
|
||||
#-XX:PrintFLSStatistics=1
|
||||
#-Xloggc:/var/log/cassandra/gc.log
|
||||
-XX:+UseGCLogFileRotation
|
||||
-XX:NumberOfGCLogFiles=10
|
||||
-XX:GCLogFileSize=10M
|
BIN
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/kubernetes-cassandra.jar
generated
vendored
Normal file
BIN
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/kubernetes-cassandra.jar
generated
vendored
Normal file
Binary file not shown.
13
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/logback.xml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/logback.xml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
<?xml version="1.0"?>
|
||||
<configuration scan="true">
|
||||
<jmxConfigurator/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
</root>
|
||||
<logger name="com.thinkaurelius.thrift" level="ERROR"/>
|
||||
</configuration>
|
27
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/ready-probe.sh
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/ready-probe.sh
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [[ $(nodetool status | grep $POD_IP) == *"UN"* ]]; then
|
||||
if [[ $DEBUG ]]; then
|
||||
echo "UN";
|
||||
fi
|
||||
exit 0;
|
||||
else
|
||||
if [[ $DEBUG ]]; then
|
||||
echo "Not Up";
|
||||
fi
|
||||
exit 1;
|
||||
fi
|
176
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/run.sh
generated
vendored
Executable file
176
vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/run.sh
generated
vendored
Executable file
@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
CASSANDRA_CONF_DIR=/etc/cassandra
|
||||
CASSANDRA_CFG=$CASSANDRA_CONF_DIR/cassandra.yaml
|
||||
|
||||
# we are doing StatefulSet or just setting our seeds
|
||||
if [ -z "$CASSANDRA_SEEDS" ]; then
|
||||
HOSTNAME=$(hostname -f)
|
||||
CASSANDRA_SEEDS=$(hostname -f)
|
||||
fi
|
||||
|
||||
# The following vars relate to there counter parts in $CASSANDRA_CFG
|
||||
# for instance rpc_address
|
||||
CASSANDRA_RPC_ADDRESS="${CASSANDRA_RPC_ADDRESS:-0.0.0.0}"
|
||||
CASSANDRA_NUM_TOKENS="${CASSANDRA_NUM_TOKENS:-32}"
|
||||
CASSANDRA_CLUSTER_NAME="${CASSANDRA_CLUSTER_NAME:='Test Cluster'}"
|
||||
CASSANDRA_LISTEN_ADDRESS=${POD_IP:-$HOSTNAME}
|
||||
CASSANDRA_BROADCAST_ADDRESS=${POD_IP:-$HOSTNAME}
|
||||
CASSANDRA_BROADCAST_RPC_ADDRESS=${POD_IP:-$HOSTNAME}
|
||||
CASSANDRA_DISK_OPTIMIZATION_STRATEGY="${CASSANDRA_DISK_OPTIMIZATION_STRATEGY:-ssd}"
|
||||
CASSANDRA_MIGRATION_WAIT="${CASSANDRA_MIGRATION_WAIT:-1}"
|
||||
CASSANDRA_ENDPOINT_SNITCH="${CASSANDRA_ENDPOINT_SNITCH:-SimpleSnitch}"
|
||||
CASSANDRA_DC="${CASSANDRA_DC}"
|
||||
CASSANDRA_RACK="${CASSANDRA_RACK}"
|
||||
CASSANDRA_RING_DELAY="${CASSANDRA_RING_DELAY:-30000}"
|
||||
CASSANDRA_AUTO_BOOTSTRAP="${CASSANDRA_AUTO_BOOTSTRAP:-true}"
|
||||
CASSANDRA_SEEDS="${CASSANDRA_SEEDS:false}"
|
||||
CASSANDRA_SEED_PROVIDER="${CASSANDRA_SEED_PROVIDER:-org.apache.cassandra.locator.SimpleSeedProvider}"
|
||||
CASSANDRA_AUTO_BOOTSTRAP="${CASSANDRA_AUTO_BOOTSTRAP:false}"
|
||||
|
||||
# Turn off JMX auth
|
||||
CASSANDRA_OPEN_JMX="${CASSANDRA_OPEN_JMX:-false}"
|
||||
# send GC to STDOUT
|
||||
CASSANDRA_GC_STDOUT="${CASSANDRA_GC_STDOUT:-false}"
|
||||
|
||||
echo Starting Cassandra on ${CASSANDRA_LISTEN_ADDRESS}
|
||||
echo CASSANDRA_CONF_DIR ${CASSANDRA_CONF_DIR}
|
||||
echo CASSANDRA_CFG ${CASSANDRA_CFG}
|
||||
echo CASSANDRA_AUTO_BOOTSTRAP ${CASSANDRA_AUTO_BOOTSTRAP}
|
||||
echo CASSANDRA_BROADCAST_ADDRESS ${CASSANDRA_BROADCAST_ADDRESS}
|
||||
echo CASSANDRA_BROADCAST_RPC_ADDRESS ${CASSANDRA_BROADCAST_RPC_ADDRESS}
|
||||
echo CASSANDRA_CLUSTER_NAME ${CASSANDRA_CLUSTER_NAME}
|
||||
echo CASSANDRA_COMPACTION_THROUGHPUT_MB_PER_SEC ${CASSANDRA_COMPACTION_THROUGHPUT_MB_PER_SEC}
|
||||
echo CASSANDRA_CONCURRENT_COMPACTORS ${CASSANDRA_CONCURRENT_COMPACTORS}
|
||||
echo CASSANDRA_CONCURRENT_READS ${CASSANDRA_CONCURRENT_READS}
|
||||
echo CASSANDRA_CONCURRENT_WRITES ${CASSANDRA_CONCURRENT_WRITES}
|
||||
echo CASSANDRA_COUNTER_CACHE_SIZE_IN_MB ${CASSANDRA_COUNTER_CACHE_SIZE_IN_MB}
|
||||
echo CASSANDRA_DC ${CASSANDRA_DC}
|
||||
echo CASSANDRA_DISK_OPTIMIZATION_STRATEGY ${CASSANDRA_DISK_OPTIMIZATION_STRATEGY}
|
||||
echo CASSANDRA_ENDPOINT_SNITCH ${CASSANDRA_ENDPOINT_SNITCH}
|
||||
echo CASSANDRA_GC_WARN_THRESHOLD_IN_MS ${CASSANDRA_GC_WARN_THRESHOLD_IN_MS}
|
||||
echo CASSANDRA_INTERNODE_COMPRESSION ${CASSANDRA_INTERNODE_COMPRESSION}
|
||||
echo CASSANDRA_KEY_CACHE_SIZE_IN_MB ${CASSANDRA_KEY_CACHE_SIZE_IN_MB}
|
||||
echo CASSANDRA_LISTEN_ADDRESS ${CASSANDRA_LISTEN_ADDRESS}
|
||||
echo CASSANDRA_LISTEN_INTERFACE ${CASSANDRA_LISTEN_INTERFACE}
|
||||
echo CASSANDRA_MEMTABLE_ALLOCATION_TYPE ${CASSANDRA_MEMTABLE_ALLOCATION_TYPE}
|
||||
echo CASSANDRA_MEMTABLE_CLEANUP_THRESHOLD ${CASSANDRA_MEMTABLE_CLEANUP_THRESHOLD}
|
||||
echo CASSANDRA_MEMTABLE_FLUSH_WRITERS ${CASSANDRA_MEMTABLE_FLUSH_WRITERS}
|
||||
echo CASSANDRA_MIGRATION_WAIT ${CASSANDRA_MIGRATION_WAIT}
|
||||
echo CASSANDRA_NUM_TOKENS ${CASSANDRA_NUM_TOKENS}
|
||||
echo CASSANDRA_RACK ${CASSANDRA_RACK}
|
||||
echo CASSANDRA_RING_DELAY ${CASSANDRA_RING_DELAY}
|
||||
echo CASSANDRA_RPC_ADDRESS ${CASSANDRA_RPC_ADDRESS}
|
||||
echo CASSANDRA_RPC_INTERFACE ${CASSANDRA_RPC_INTERFACE}
|
||||
echo CASSANDRA_SEEDS ${CASSANDRA_SEEDS}
|
||||
echo CASSANDRA_SEED_PROVIDER ${CASSANDRA_SEED_PROVIDER}
|
||||
|
||||
|
||||
# if DC and RACK are set, use GossipingPropertyFileSnitch
|
||||
if [[ $CASSANDRA_DC && $CASSANDRA_RACK ]]; then
|
||||
echo "dc=$CASSANDRA_DC" > $CASSANDRA_CONF_DIR/cassandra-rackdc.properties
|
||||
echo "rack=$CASSANDRA_RACK" >> $CASSANDRA_CONF_DIR/cassandra-rackdc.properties
|
||||
CASSANDRA_ENDPOINT_SNITCH="GossipingPropertyFileSnitch"
|
||||
fi
|
||||
|
||||
if [ -n "$CASSANDRA_MAX_HEAP" ]; then
|
||||
sed -ri "s/^(#)?-Xmx[0-9]+.*/-Xmx$CASSANDRA_MAX_HEAP/" "$CASSANDRA_CONF_DIR/jvm.options"
|
||||
sed -ri "s/^(#)?-Xms[0-9]+.*/-Xms$CASSANDRA_MAX_HEAP/" "$CASSANDRA_CONF_DIR/jvm.options"
|
||||
fi
|
||||
|
||||
if [ -n "$CASSANDRA_REPLACE_NODE" ]; then
|
||||
echo "-Dcassandra.replace_address=$CASSANDRA_REPLACE_NODE/" >> "$CASSANDRA_CONF_DIR/jvm.options"
|
||||
fi
|
||||
|
||||
for rackdc in dc rack; do
|
||||
var="CASSANDRA_${rackdc^^}"
|
||||
val="${!var}"
|
||||
if [ "$val" ]; then
|
||||
sed -ri 's/^('"$rackdc"'=).*/\1 '"$val"'/' "$CASSANDRA_CONF_DIR/cassandra-rackdc.properties"
|
||||
fi
|
||||
done
|
||||
|
||||
# TODO what else needs to be modified
|
||||
for yaml in \
|
||||
broadcast_address \
|
||||
broadcast_rpc_address \
|
||||
cluster_name \
|
||||
disk_optimization_strategy \
|
||||
endpoint_snitch \
|
||||
listen_address \
|
||||
num_tokens \
|
||||
rpc_address \
|
||||
start_rpc \
|
||||
key_cache_size_in_mb \
|
||||
concurrent_reads \
|
||||
concurrent_writes \
|
||||
memtable_cleanup_threshold \
|
||||
memtable_allocation_type \
|
||||
memtable_flush_writers \
|
||||
concurrent_compactors \
|
||||
compaction_throughput_mb_per_sec \
|
||||
counter_cache_size_in_mb \
|
||||
internode_compression \
|
||||
endpoint_snitch \
|
||||
gc_warn_threshold_in_ms \
|
||||
listen_interface \
|
||||
rpc_interface \
|
||||
; do
|
||||
var="CASSANDRA_${yaml^^}"
|
||||
val="${!var}"
|
||||
if [ "$val" ]; then
|
||||
sed -ri 's/^(# )?('"$yaml"':).*/\2 '"$val"'/' "$CASSANDRA_CFG"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "auto_bootstrap: ${CASSANDRA_AUTO_BOOTSTRAP}" >> $CASSANDRA_CFG
|
||||
|
||||
# set the seed to itself. This is only for the first pod, otherwise
|
||||
# it will be able to get seeds from the seed provider
|
||||
if [[ $CASSANDRA_SEEDS == 'false' ]]; then
|
||||
sed -ri 's/- seeds:.*/- seeds: "'"$POD_IP"'"/' $CASSANDRA_CFG
|
||||
else # if we have seeds set them. Probably StatefulSet
|
||||
sed -ri 's/- seeds:.*/- seeds: "'"$CASSANDRA_SEEDS"'"/' $CASSANDRA_CFG
|
||||
fi
|
||||
|
||||
sed -ri 's/- class_name: SEED_PROVIDER/- class_name: '"$CASSANDRA_SEED_PROVIDER"'/' $CASSANDRA_CFG
|
||||
|
||||
# send gc to stdout
|
||||
if [[ $CASSANDRA_GC_STDOUT == 'true' ]]; then
|
||||
sed -ri 's/ -Xloggc:\/var\/log\/cassandra\/gc\.log//' $CASSANDRA_CONF_DIR/cassandra-env.sh
|
||||
fi
|
||||
|
||||
# enable RMI and JMX to work on one port
|
||||
echo "JVM_OPTS=\"\$JVM_OPTS -Djava.rmi.server.hostname=$POD_IP\"" >> $CASSANDRA_CONF_DIR/cassandra-env.sh
|
||||
|
||||
# getting WARNING messages with Migration Service
|
||||
echo "-Dcassandra.migration_task_wait_in_seconds=${CASSANDRA_MIGRATION_WAIT}" >> $CASSANDRA_CONF_DIR/jvm.options
|
||||
echo "-Dcassandra.ring_delay_ms=${CASSANDRA_RING_DELAY}" >> $CASSANDRA_CONF_DIR/jvm.options
|
||||
|
||||
if [[ $CASSANDRA_OPEN_JMX == 'true' ]]; then
|
||||
export LOCAL_JMX=no
|
||||
sed -ri 's/ -Dcom\.sun\.management\.jmxremote\.authenticate=true/ -Dcom\.sun\.management\.jmxremote\.authenticate=false/' $CASSANDRA_CONF_DIR/cassandra-env.sh
|
||||
sed -ri 's/ -Dcom\.sun\.management\.jmxremote\.password\.file=\/etc\/cassandra\/jmxremote\.password//' $CASSANDRA_CONF_DIR/cassandra-env.sh
|
||||
fi
|
||||
|
||||
chmod 700 "${CASSANDRA_DATA}"
|
||||
chown -c -R cassandra "${CASSANDRA_DATA}" "${CASSANDRA_CONF_DIR}"
|
||||
|
||||
export CLASSPATH=/kubernetes-cassandra.jar
|
||||
|
||||
su cassandra -c "$CASSANDRA_HOME/bin/cassandra -f"
|
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/.gitignore
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
target
|
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/cassandra/java/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/cassandra/java/README.md)
|
94
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/pom.xml
generated
vendored
Normal file
94
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/pom.xml
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
<!--
|
||||
Copyright (C) 2015 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations under
|
||||
the License.
|
||||
-->
|
||||
<project>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>io.k8s.cassandra</groupId>
|
||||
<artifactId>kubernetes-cassandra</artifactId>
|
||||
<version>1.0.2</version>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<properties>
|
||||
<logback.version>1.1.3</logback.version>
|
||||
<cassandra.version>3.9</cassandra.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.hamcrest</groupId>
|
||||
<artifactId>hamcrest-all</artifactId>
|
||||
<version>1.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.5</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-classic</artifactId>
|
||||
<version>${logback.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-core</artifactId>
|
||||
<version>${logback.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.jackson</groupId>
|
||||
<artifactId>jackson-core-asl</artifactId>
|
||||
<version>1.6.3</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.jackson</groupId>
|
||||
<artifactId>jackson-mapper-asl</artifactId>
|
||||
<version>1.6.3</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.cassandra</groupId>
|
||||
<artifactId>cassandra-all</artifactId>
|
||||
<version>${cassandra.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
254
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java
generated
vendored
Normal file
254
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java
generated
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
* Copyright (C) 2015 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package io.k8s.cassandra;
|
||||
|
||||
import org.apache.cassandra.config.Config;
|
||||
import org.apache.cassandra.config.ConfigurationLoader;
|
||||
import org.apache.cassandra.config.YamlConfigurationLoader;
|
||||
import org.apache.cassandra.exceptions.ConfigurationException;
|
||||
import org.apache.cassandra.locator.SeedProvider;
|
||||
import org.apache.cassandra.locator.SimpleSeedProvider;
|
||||
import org.apache.cassandra.utils.FBUtilities;
|
||||
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.*;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.URL;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.KeyManagementException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Self discovery {@link SeedProvider} that creates a list of Cassandra Seeds by
|
||||
* communicating with the Kubernetes API.
|
||||
* <p>Various System Variable can be used to configure this provider:
|
||||
* <ul>
|
||||
* <li>KUBERNETES_PORT_443_TCP_ADDR defaults to kubernetes.default.svc.cluster.local</li>
|
||||
* <li>KUBERNETES_PORT_443_TCP_PORT defaults to 443</li>
|
||||
* <li>CASSANDRA_SERVICE defaults to cassandra</li>
|
||||
* <li>POD_NAMESPACE defaults to 'default'</li>
|
||||
* <li>CASSANDRA_SERVICE_NUM_SEEDS defaults to 8 seeds</li>
|
||||
* <li>K8S_ACCOUNT_TOKEN defaults to the path for the default token</li>
|
||||
* </ul>
|
||||
*/
|
||||
public class KubernetesSeedProvider implements SeedProvider {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(KubernetesSeedProvider.class);
|
||||
|
||||
/**
|
||||
* default seeds to fall back on
|
||||
*/
|
||||
private List<InetAddress> defaultSeeds;
|
||||
|
||||
private TrustManager[] trustAll;
|
||||
|
||||
private HostnameVerifier trustAllHosts;
|
||||
|
||||
/**
|
||||
* Create new Seeds
|
||||
* @param params
|
||||
*/
|
||||
public KubernetesSeedProvider(Map<String, String> params) {
|
||||
|
||||
// Create default seeds
|
||||
defaultSeeds = createDefaultSeeds();
|
||||
|
||||
// TODO: Load the CA cert when it is available on all platforms.
|
||||
trustAll = new TrustManager[] {
|
||||
new X509TrustManager() {
|
||||
public void checkServerTrusted(X509Certificate[] certs, String authType) {}
|
||||
public void checkClientTrusted(X509Certificate[] certs, String authType) {}
|
||||
public X509Certificate[] getAcceptedIssuers() { return null; }
|
||||
}
|
||||
};
|
||||
|
||||
trustAllHosts = new HostnameVerifier() {
|
||||
public boolean verify(String hostname, SSLSession session) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Call kubernetes API to collect a list of seed providers
|
||||
* @return list of seed providers
|
||||
*/
|
||||
public List<InetAddress> getSeeds() {
|
||||
|
||||
String host = getEnvOrDefault("KUBERNETES_PORT_443_TCP_ADDR", "kubernetes.default.svc.cluster.local");
|
||||
String port = getEnvOrDefault("KUBERNETES_PORT_443_TCP_PORT", "443");
|
||||
String serviceName = getEnvOrDefault("CASSANDRA_SERVICE", "cassandra");
|
||||
String podNamespace = getEnvOrDefault("POD_NAMESPACE", "default");
|
||||
String path = String.format("/api/v1/namespaces/%s/endpoints/", podNamespace);
|
||||
String seedSizeVar = getEnvOrDefault("CASSANDRA_SERVICE_NUM_SEEDS", "8");
|
||||
Integer seedSize = Integer.valueOf(seedSizeVar);
|
||||
String accountToken = getEnvOrDefault("K8S_ACCOUNT_TOKEN", "/var/run/secrets/kubernetes.io/serviceaccount/token");
|
||||
|
||||
List<InetAddress> seeds = new ArrayList<InetAddress>();
|
||||
try {
|
||||
String token = getServiceAccountToken(accountToken);
|
||||
|
||||
SSLContext ctx = SSLContext.getInstance("SSL");
|
||||
ctx.init(null, trustAll, new SecureRandom());
|
||||
|
||||
String PROTO = "https://";
|
||||
URL url = new URL(PROTO + host + ":" + port + path + serviceName);
|
||||
logger.info("Getting endpoints from " + url);
|
||||
HttpsURLConnection conn = (HttpsURLConnection)url.openConnection();
|
||||
|
||||
// TODO: Remove this once the CA cert is propagated everywhere, and replace
|
||||
// with loading the CA cert.
|
||||
conn.setHostnameVerifier(trustAllHosts);
|
||||
|
||||
conn.setSSLSocketFactory(ctx.getSocketFactory());
|
||||
conn.addRequestProperty("Authorization", "Bearer " + token);
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
Endpoints endpoints = mapper.readValue(conn.getInputStream(), Endpoints.class);
|
||||
|
||||
if (endpoints != null) {
|
||||
// Here is a problem point, endpoints.subsets can be null in first node cases.
|
||||
if (endpoints.subsets != null && !endpoints.subsets.isEmpty()){
|
||||
for (Subset subset : endpoints.subsets) {
|
||||
if (subset.addresses != null && !subset.addresses.isEmpty()) {
|
||||
for (Address address : subset.addresses) {
|
||||
seeds.add(InetAddress.getByName(address.ip));
|
||||
|
||||
if(seeds.size() >= seedSize) {
|
||||
logger.info("Available num endpoints: " + seeds.size());
|
||||
return Collections.unmodifiableList(seeds);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.info("Available num endpoints: " + seeds.size());
|
||||
} else {
|
||||
logger.warn("Endpoints are not available using default seeds in cassandra.yaml");
|
||||
return Collections.unmodifiableList(defaultSeeds);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.warn("Request to kubernetes apiserver failed, using default seeds in cassandra.yaml", ex);
|
||||
return Collections.unmodifiableList(defaultSeeds);
|
||||
}
|
||||
|
||||
if (seeds.size() == 0) {
|
||||
// If we got nothing, we might be the first instance, in that case
|
||||
// fall back on the seeds that were passed in cassandra.yaml.
|
||||
logger.warn("Seeds are not available using default seeds in cassandra.yaml");
|
||||
return Collections.unmodifiableList(defaultSeeds);
|
||||
}
|
||||
|
||||
return Collections.unmodifiableList(seeds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Code taken from {@link SimpleSeedProvider}. This is used as a fall back
|
||||
* incase we don't find seeds
|
||||
* @return
|
||||
*/
|
||||
protected List<InetAddress> createDefaultSeeds()
|
||||
{
|
||||
Config conf;
|
||||
try {
|
||||
conf = loadConfig();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
String[] hosts = conf.seed_provider.parameters.get("seeds").split(",", -1);
|
||||
List<InetAddress> seeds = new ArrayList<InetAddress>();
|
||||
for (String host : hosts) {
|
||||
try {
|
||||
seeds.add(InetAddress.getByName(host.trim()));
|
||||
}
|
||||
catch (UnknownHostException ex) {
|
||||
// not fatal... DD will bark if there end up being zero seeds.
|
||||
logger.warn("Seed provider couldn't lookup host {}", host);
|
||||
}
|
||||
}
|
||||
|
||||
if(seeds.size() == 0) {
|
||||
try {
|
||||
seeds.add(InetAddress.getLocalHost());
|
||||
} catch (UnknownHostException e) {
|
||||
logger.warn("Seed provider couldn't lookup localhost");
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableList(seeds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Code taken from {@link SimpleSeedProvider}
|
||||
* @return
|
||||
*/
|
||||
protected static Config loadConfig() throws ConfigurationException
|
||||
{
|
||||
String loaderClass = System.getProperty("cassandra.config.loader");
|
||||
ConfigurationLoader loader = loaderClass == null
|
||||
? new YamlConfigurationLoader()
|
||||
: FBUtilities.<ConfigurationLoader>construct(loaderClass, "configuration loading");
|
||||
return loader.loadConfig();
|
||||
}
|
||||
|
||||
private static String getEnvOrDefault(String var, String def) {
|
||||
String val = System.getenv(var);
|
||||
if (val == null) {
|
||||
val = def;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
private static String getServiceAccountToken(String file) {
|
||||
try {
|
||||
return new String(Files.readAllBytes(Paths.get(file)));
|
||||
} catch (IOException e) {
|
||||
logger.warn("unable to load service account token" + file);
|
||||
throw new RuntimeException("Unable to load services account token " + file);
|
||||
}
|
||||
}
|
||||
|
||||
protected List<InetAddress> getDefaultSeeds() {
|
||||
return defaultSeeds;
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
static class Address {
|
||||
public String ip;
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
static class Subset {
|
||||
public List<Address> addresses;
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
static class Endpoints {
|
||||
public List<Subset> subsets;
|
||||
}
|
||||
}
|
64
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (C) 2015 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package io.k8s.cassandra;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.cassandra.locator.SeedProvider;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class KubernetesSeedProviderTest {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(KubernetesSeedProviderTest.class);
|
||||
|
||||
@Test
|
||||
@Ignore("has to be run inside of a kube cluster")
|
||||
public void getSeeds() throws Exception {
|
||||
SeedProvider provider = new KubernetesSeedProvider(new HashMap<String, String>());
|
||||
List<InetAddress> seeds = provider.getSeeds();
|
||||
|
||||
assertThat(seeds, is(not(empty())));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultSeeds() throws Exception {
|
||||
|
||||
KubernetesSeedProvider provider = new KubernetesSeedProvider(new HashMap<String,String>());
|
||||
List<InetAddress> seeds = provider.getDefaultSeeds();
|
||||
List<InetAddress> seedsTest = new ArrayList<>();
|
||||
seedsTest.add(InetAddress.getByName("8.4.4.4"));
|
||||
seedsTest.add(InetAddress.getByName("8.8.8.8"));
|
||||
assertThat(seeds, is(not(empty())));
|
||||
assertThat(seeds, is(seedsTest));
|
||||
logger.debug("seeds loaded {}", seeds);
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
# Copyright (C) 2015 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy of
|
||||
# the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations under
|
||||
# the License.
|
||||
#
|
||||
# Warning!
|
||||
# Consider the effects on 'o.a.c.i.s.LegacySSTableTest' before changing schemas in this file.
|
||||
#
|
||||
cluster_name: Test Cluster
|
||||
# memtable_allocation_type: heap_buffers
|
||||
memtable_allocation_type: offheap_objects
|
||||
commitlog_sync: batch
|
||||
commitlog_sync_batch_window_in_ms: 1.0
|
||||
commitlog_segment_size_in_mb: 5
|
||||
commitlog_directory: target/cassandra/commitlog
|
||||
hints_directory: target/cassandra/hints
|
||||
partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
|
||||
listen_address: 127.0.0.1
|
||||
storage_port: 7010
|
||||
rpc_port: 9170
|
||||
start_native_transport: true
|
||||
native_transport_port: 9042
|
||||
column_index_size_in_kb: 4
|
||||
saved_caches_directory: target/cassandra/saved_caches
|
||||
data_file_directories:
|
||||
- target/cassandra/data
|
||||
disk_access_mode: mmap
|
||||
seed_provider:
|
||||
- class_name: io.k8s.cassandra.KubernetesSeedProvider
|
||||
parameters:
|
||||
- seeds: "8.4.4.4,8.8.8.8"
|
||||
endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
|
||||
dynamic_snitch: true
|
||||
request_scheduler: org.apache.cassandra.scheduler.RoundRobinScheduler
|
||||
request_scheduler_id: keyspace
|
||||
server_encryption_options:
|
||||
internode_encryption: none
|
||||
keystore: conf/.keystore
|
||||
keystore_password: cassandra
|
||||
truststore: conf/.truststore
|
||||
truststore_password: cassandra
|
||||
incremental_backups: true
|
||||
concurrent_compactors: 4
|
||||
compaction_throughput_mb_per_sec: 0
|
||||
row_cache_class_name: org.apache.cassandra.cache.OHCProvider
|
||||
row_cache_size_in_mb: 16
|
||||
enable_user_defined_functions: true
|
||||
enable_scripted_user_defined_functions: true
|
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/logback-test.xml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/logback-test.xml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
<!--
|
||||
Copyright (C) 2015 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations under
|
||||
the License.
|
||||
-->
|
||||
|
||||
<configuration debug="false" scan="true">
|
||||
|
||||
<appender name="STDOUT" target="System.out" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern>
|
||||
</encoder>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>DEBUG</level>
|
||||
</filter>
|
||||
</appender>
|
||||
|
||||
<logger name="io.k8s.cassandra" level="DEBUG"/>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
1
vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/hazelcast/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/hazelcast/README.md)
|
26
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-deployment.yaml
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hazelcast
|
||||
labels:
|
||||
name: hazelcast
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
spec:
|
||||
containers:
|
||||
- name: hazelcast
|
||||
image: quay.io/pires/hazelcast-kubernetes:3.8_1
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: "DNS_DOMAIN"
|
||||
value: "cluster.local"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: hazelcast
|
||||
containerPort: 5701
|
11
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
spec:
|
||||
ports:
|
||||
- port: 5701
|
||||
selector:
|
||||
name: hazelcast
|
1
vendor/k8s.io/kubernetes/examples/storage/minio/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/minio/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/minio/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/minio/README.md)
|
13
vendor/k8s.io/kubernetes/examples/storage/minio/minio-distributed-headless-service.yaml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/examples/storage/minio/minio-distributed-headless-service.yaml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: minio
|
||||
labels:
|
||||
app: minio
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9000
|
||||
name: minio
|
||||
selector:
|
||||
app: minio
|
12
vendor/k8s.io/kubernetes/examples/storage/minio/minio-distributed-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/minio/minio-distributed-service.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: minio-service
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 9000
|
||||
targetPort: 9000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: minio
|
50
vendor/k8s.io/kubernetes/examples/storage/minio/minio-distributed-statefulset.yaml
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/examples/storage/minio/minio-distributed-statefulset.yaml
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: minio
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: minio
|
||||
serviceName: minio
|
||||
replicas: 4
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
labels:
|
||||
app: minio
|
||||
spec:
|
||||
containers:
|
||||
- name: minio
|
||||
env:
|
||||
- name: MINIO_ACCESS_KEY
|
||||
value: "minio"
|
||||
- name: MINIO_SECRET_KEY
|
||||
value: "minio123"
|
||||
image: minio/minio:latest
|
||||
args:
|
||||
- server
|
||||
- http://minio-0.minio.default.svc.cluster.local/data
|
||||
- http://minio-1.minio.default.svc.cluster.local/data
|
||||
- http://minio-2.minio.default.svc.cluster.local/data
|
||||
- http://minio-3.minio.default.svc.cluster.local/data
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
hostPort: 9000
|
||||
# These volume mounts are persistent. Each pod in the StatefulSet
|
||||
# gets a volume mounted based on this field.
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
# These are converted to volume claims by the controller
|
||||
# and mounted at the paths mentioned above.
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
43
vendor/k8s.io/kubernetes/examples/storage/minio/minio-standalone-deployment.yaml
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/examples/storage/minio/minio-standalone-deployment.yaml
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
# This name uniquely identifies the Deployment
|
||||
name: minio-deployment
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: minio
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
# Label is used as selector in the service.
|
||||
app: minio
|
||||
spec:
|
||||
# Refer to the PVC created earlier
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
# Name of the PVC created earlier
|
||||
claimName: minio-pv-claim
|
||||
containers:
|
||||
- name: minio
|
||||
# Pulls the default Minio image from Docker Hub
|
||||
image: minio/minio:latest
|
||||
args:
|
||||
- server
|
||||
- /storage
|
||||
env:
|
||||
# Minio access key and secret key
|
||||
- name: MINIO_ACCESS_KEY
|
||||
value: "minio"
|
||||
- name: MINIO_SECRET_KEY
|
||||
value: "minio123"
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
hostPort: 9000
|
||||
# Mount the volume into the pod
|
||||
volumeMounts:
|
||||
- name: storage # must match the volume name, above
|
||||
mountPath: "/storage"
|
15
vendor/k8s.io/kubernetes/examples/storage/minio/minio-standalone-pvc.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/examples/storage/minio/minio-standalone-pvc.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
# This name uniquely identifies the PVC. Will be used in deployment below.
|
||||
name: minio-pv-claim
|
||||
labels:
|
||||
app: minio-storage-claim
|
||||
spec:
|
||||
# Read more about access modes here: http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
# This is the request for storage. Should be available in the cluster.
|
||||
requests:
|
||||
storage: 10Gi
|
12
vendor/k8s.io/kubernetes/examples/storage/minio/minio-standalone-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/minio/minio-standalone-service.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: minio-service
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 9000
|
||||
targetPort: 9000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: minio
|
1
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/mysql-galera/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/mysql-galera/README.md)
|
56
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
# add our user and group first to make sure their IDs get assigned
|
||||
# consistently, regardless of whatever dependencies get added
|
||||
RUN groupadd -r mysql && useradd -r -g mysql mysql
|
||||
|
||||
ENV PERCONA_XTRADB_VERSION 5.6
|
||||
ENV MYSQL_VERSION 5.6
|
||||
ENV TERM linux
|
||||
|
||||
RUN apt-get update
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y perl --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 8507EFA5
|
||||
|
||||
RUN echo "deb http://repo.percona.com/apt trusty main" > /etc/apt/sources.list.d/percona.list
|
||||
RUN echo "deb-src http://repo.percona.com/apt trusty main" >> /etc/apt/sources.list.d/percona.list
|
||||
|
||||
# the "/var/lib/mysql" stuff here is because the mysql-server
|
||||
# postinst doesn't have an explicit way to disable the
|
||||
# mysql_install_db codepath besides having a database already
|
||||
# "configured" (ie, stuff in /var/lib/mysql/mysql)
|
||||
# also, we set debconf keys to make APT a little quieter
|
||||
RUN { \
|
||||
echo percona-server-server-5.6 percona-server-server/data-dir select ''; \
|
||||
echo percona-server-server-5.6 percona-server-server/root_password password ''; \
|
||||
} | debconf-set-selections \
|
||||
&& apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y percona-xtradb-cluster-client-"${MYSQL_VERSION}" \
|
||||
percona-xtradb-cluster-common-"${MYSQL_VERSION}" percona-xtradb-cluster-server-"${MYSQL_VERSION}" \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql
|
||||
|
||||
VOLUME /var/lib/mysql
|
||||
|
||||
COPY my.cnf /etc/mysql/my.cnf
|
||||
COPY cluster.cnf /etc/mysql/conf.d/cluster.cnf
|
||||
|
||||
COPY docker-entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
EXPOSE 3306 4444 4567 4568
|
||||
CMD ["mysqld"]
|
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/cluster.cnf
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/cluster.cnf
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[mysqld]
|
||||
|
||||
wsrep_provider=/usr/lib/libgalera_smm.so
|
||||
wsrep_cluster_address=gcomm://
|
||||
binlog_format=ROW
|
||||
default_storage_engine=InnoDB
|
||||
innodb_autoinc_lock_mode=2
|
||||
|
||||
wsrep_sst_method=xtrabackup-v2
|
||||
wsrep_node_address=127.0.0.1
|
||||
wsrep_cluster_name=galera_kubernetes
|
||||
wsrep_sst_auth=sstuser:changethis
|
164
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh
generated
vendored
Executable file
164
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh
generated
vendored
Executable file
@ -0,0 +1,164 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# This script does the following:
|
||||
#
|
||||
# 1. Sets up database privileges by building an SQL script
|
||||
# 2. MySQL is initially started with this script a first time
|
||||
# 3. Modify my.cnf and cluster.cnf to reflect available nodes to join
|
||||
#
|
||||
|
||||
# if NUM_NODES not passed, default to 3
|
||||
if [ -z "$NUM_NODES" ]; then
|
||||
NUM_NODES=3
|
||||
fi
|
||||
|
||||
if [ "${1:0:1}" = '-' ]; then
|
||||
set -- mysqld "$@"
|
||||
fi
|
||||
|
||||
# if the command passed is 'mysqld' via CMD, then begin processing.
|
||||
if [ "$1" = 'mysqld' ]; then
|
||||
# read DATADIR from the MySQL config
|
||||
DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
|
||||
|
||||
# only check if system tables not created from mysql_install_db and permissions
|
||||
# set with initial SQL script before proceeding to build SQL script
|
||||
if [ ! -d "$DATADIR/mysql" ]; then
|
||||
# fail if user didn't supply a root password
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
|
||||
echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
|
||||
echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# mysql_install_db installs system tables
|
||||
echo 'Running mysql_install_db ...'
|
||||
mysql_install_db --datadir="$DATADIR"
|
||||
echo 'Finished mysql_install_db'
|
||||
|
||||
# this script will be run once when MySQL first starts to set up
|
||||
# prior to creating system tables and will ensure proper user permissions
|
||||
tempSqlFile='/tmp/mysql-first-time.sql'
|
||||
cat > "$tempSqlFile" <<-EOSQL
|
||||
DELETE FROM mysql.user ;
|
||||
CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
|
||||
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
|
||||
EOSQL
|
||||
|
||||
if [ "$MYSQL_DATABASE" ]; then
|
||||
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" >> "$tempSqlFile"
|
||||
fi
|
||||
|
||||
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
|
||||
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" >> "$tempSqlFile"
|
||||
|
||||
if [ "$MYSQL_DATABASE" ]; then
|
||||
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" >> "$tempSqlFile"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Add SST (Single State Transfer) user if Clustering is turned on
|
||||
if [ -n "$GALERA_CLUSTER" ]; then
|
||||
# this is the Single State Transfer user (SST, initial dump or xtrabackup user)
|
||||
WSREP_SST_USER=${WSREP_SST_USER:-"sst"}
|
||||
if [ -z "$WSREP_SST_PASSWORD" ]; then
|
||||
echo >&2 'error: Galera cluster is enabled and WSREP_SST_PASSWORD is not set'
|
||||
echo >&2 ' Did you forget to add -e WSREP_SST__PASSWORD=... ?'
|
||||
exit 1
|
||||
fi
|
||||
# add single state transfer (SST) user privileges
|
||||
echo "CREATE USER '${WSREP_SST_USER}'@'localhost' IDENTIFIED BY '${WSREP_SST_PASSWORD}';" >> "$tempSqlFile"
|
||||
echo "GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '${WSREP_SST_USER}'@'localhost';" >> "$tempSqlFile"
|
||||
fi
|
||||
|
||||
echo 'FLUSH PRIVILEGES ;' >> "$tempSqlFile"
|
||||
|
||||
# Add the SQL file to mysqld's command line args
|
||||
set -- "$@" --init-file="$tempSqlFile"
|
||||
fi
|
||||
|
||||
chown -R mysql:mysql "$DATADIR"
|
||||
fi
|
||||
|
||||
# if cluster is turned on, then proceed to build cluster setting strings
|
||||
# that will be interpolated into the config files
|
||||
if [ -n "$GALERA_CLUSTER" ]; then
|
||||
# this is the Single State Transfer user (SST, initial dump or xtrabackup user)
|
||||
WSREP_SST_USER=${WSREP_SST_USER:-"sst"}
|
||||
if [ -z "$WSREP_SST_PASSWORD" ]; then
|
||||
echo >&2 'error: database is uninitialized and WSREP_SST_PASSWORD not set'
|
||||
echo >&2 ' Did you forget to add -e WSREP_SST_PASSWORD=xxx ?'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# user/password for SST user
|
||||
sed -i -e "s|^wsrep_sst_auth=sstuser:changethis|wsrep_sst_auth=${WSREP_SST_USER}:${WSREP_SST_PASSWORD}|" /etc/mysql/conf.d/cluster.cnf
|
||||
|
||||
# set nodes own address
|
||||
WSREP_NODE_ADDRESS=`ip addr show | grep -E '^[ ]*inet' | grep -m1 global | awk '{ print $2 }' | sed -e 's/\/.*//'`
|
||||
if [ -n "$WSREP_NODE_ADDRESS" ]; then
|
||||
sed -i -e "s|^wsrep_node_address=.*$|wsrep_node_address=${WSREP_NODE_ADDRESS}|" /etc/mysql/conf.d/cluster.cnf
|
||||
fi
|
||||
|
||||
# if the string is not defined or it only is 'gcomm://', this means bootstrap
|
||||
if [ -z "$WSREP_CLUSTER_ADDRESS" -o "$WSREP_CLUSTER_ADDRESS" == "gcomm://" ]; then
|
||||
# if empty, set to 'gcomm://'
|
||||
# NOTE: this list does not imply membership.
|
||||
# It only means "obtain SST and join from one of these..."
|
||||
if [ -z "$WSREP_CLUSTER_ADDRESS" ]; then
|
||||
WSREP_CLUSTER_ADDRESS="gcomm://"
|
||||
fi
|
||||
|
||||
# loop through number of nodes
|
||||
for NUM in `seq 1 $NUM_NODES`; do
|
||||
NODE_SERVICE_HOST="PXC_NODE${NUM}_SERVICE_HOST"
|
||||
|
||||
# if set
|
||||
if [ -n "${!NODE_SERVICE_HOST}" ]; then
|
||||
# if not its own IP, then add it
|
||||
if [ $(expr "$HOSTNAME" : "pxc-node${NUM}") -eq 0 ]; then
|
||||
# if not the first bootstrap node add comma
|
||||
if [ $WSREP_CLUSTER_ADDRESS != "gcomm://" ]; then
|
||||
WSREP_CLUSTER_ADDRESS="${WSREP_CLUSTER_ADDRESS},"
|
||||
fi
|
||||
# append
|
||||
# if user specifies USE_IP, use that
|
||||
if [ -n "${USE_IP}" ]; then
|
||||
WSREP_CLUSTER_ADDRESS="${WSREP_CLUSTER_ADDRESS}"${!NODE_SERVICE_HOST}
|
||||
# otherwise use DNS
|
||||
else
|
||||
WSREP_CLUSTER_ADDRESS="${WSREP_CLUSTER_ADDRESS}pxc-node${NUM}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# WSREP_CLUSTER_ADDRESS is now complete and will be interpolated into the
|
||||
# cluster address string (wsrep_cluster_address) in the cluster
|
||||
# configuration file, cluster.cnf
|
||||
if [ -n "$WSREP_CLUSTER_ADDRESS" -a "$WSREP_CLUSTER_ADDRESS" != "gcomm://" ]; then
|
||||
sed -i -e "s|^wsrep_cluster_address=gcomm://|wsrep_cluster_address=${WSREP_CLUSTER_ADDRESS}|" /etc/mysql/conf.d/cluster.cnf
|
||||
fi
|
||||
fi
|
||||
|
||||
# random server ID needed
|
||||
sed -i -e "s/^server\-id=.*$/server-id=${RANDOM}/" /etc/mysql/my.cnf
|
||||
|
||||
# finally, start mysql
|
||||
exec "$@"
|
55
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/my.cnf
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/my.cnf
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
[client]
|
||||
port=3306
|
||||
socket=/var/run/mysqld/mysqld.sock
|
||||
|
||||
[mysqld_safe]
|
||||
socket=/var/run/mysqld/mysqld.sock
|
||||
nice=0
|
||||
|
||||
[mysqld]
|
||||
user=mysql
|
||||
pid-file=/var/run/mysqld/mysqld.pid
|
||||
socket=/var/run/mysqld/mysqld.sock
|
||||
port=3306
|
||||
basedir=/usr
|
||||
datadir=/var/lib/mysql
|
||||
tmpdir=/tmp
|
||||
lc-messages-dir=/usr/share/mysql
|
||||
skip-external-locking
|
||||
|
||||
key_buffer=16M
|
||||
max_allowed_packet=16M
|
||||
thread_stack=192K
|
||||
thread_cache_size=8
|
||||
|
||||
myisam-recover=BACKUP
|
||||
#max_connections=100
|
||||
query_cache_limit=1M
|
||||
query_cache_size=16M
|
||||
slow_query_log=1
|
||||
slow_query_log_file=/var/log/mysql/mysql-slow.log
|
||||
long_query_time=2
|
||||
log-queries-not-using-indexes
|
||||
|
||||
server-id=12345
|
||||
log_bin=/var/log/mysql/mysql-bin.log
|
||||
expire_logs_days=4
|
||||
max_binlog_size=100M
|
||||
|
||||
default_storage_engine=InnoDB
|
||||
innodb_file_per_table
|
||||
innodb_log_file_size=100M
|
||||
innodb_log_buffer_size=10M
|
||||
innodb_log_files_in_group=2
|
||||
innodb_buffer_pool_instances=4
|
||||
innodb_buffer_pool_size=100M
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
max_allowed_packet=16M
|
||||
|
||||
[isamchk]
|
||||
key_buffer=16M
|
||||
|
||||
!includedir /etc/mysql/conf.d/
|
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-cluster-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-cluster-service.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-cluster
|
||||
labels:
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
selector:
|
||||
unit: pxc-cluster
|
57
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-node1
|
||||
labels:
|
||||
node: pxc-node1
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
- port: 4444
|
||||
name: state-snapshot-transfer
|
||||
- port: 4567
|
||||
name: replication-traffic
|
||||
- port: 4568
|
||||
name: incremental-state-transfer
|
||||
selector:
|
||||
node: pxc-node1
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: pxc-node1
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
node: pxc-node1
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.3
|
||||
image: capttofu/percona_xtradb_cluster_5_6:beta
|
||||
name: pxc-node1
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
- containerPort: 4444
|
||||
- containerPort: 4567
|
||||
- containerPort: 4568
|
||||
env:
|
||||
- name: GALERA_CLUSTER
|
||||
value: "true"
|
||||
- name: WSREP_CLUSTER_ADDRESS
|
||||
value: gcomm://
|
||||
- name: WSREP_SST_USER
|
||||
value: sst
|
||||
- name: WSREP_SST_PASSWORD
|
||||
value: sst
|
||||
- name: MYSQL_USER
|
||||
value: mysql
|
||||
- name: MYSQL_PASSWORD
|
||||
value: mysql
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: c-krit
|
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-node2
|
||||
labels:
|
||||
node: pxc-node2
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
- port: 4444
|
||||
name: state-snapshot-transfer
|
||||
- port: 4567
|
||||
name: replication-traffic
|
||||
- port: 4568
|
||||
name: incremental-state-transfer
|
||||
selector:
|
||||
node: pxc-node2
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: pxc-node2
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
node: pxc-node2
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.3
|
||||
image: capttofu/percona_xtradb_cluster_5_6:beta
|
||||
name: pxc-node2
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
- containerPort: 4444
|
||||
- containerPort: 4567
|
||||
- containerPort: 4568
|
||||
env:
|
||||
- name: GALERA_CLUSTER
|
||||
value: "true"
|
||||
- name: WSREP_CLUSTER_ADDRESS
|
||||
value: gcomm://
|
||||
- name: WSREP_SST_USER
|
||||
value: sst
|
||||
- name: WSREP_SST_PASSWORD
|
||||
value: sst
|
||||
- name: MYSQL_USER
|
||||
value: mysql
|
||||
- name: MYSQL_PASSWORD
|
||||
value: mysql
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: c-krit
|
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pxc-node3
|
||||
labels:
|
||||
node: pxc-node3
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: mysql
|
||||
- port: 4444
|
||||
name: state-snapshot-transfer
|
||||
- port: 4567
|
||||
name: replication-traffic
|
||||
- port: 4568
|
||||
name: incremental-state-transfer
|
||||
selector:
|
||||
node: pxc-node3
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: pxc-node3
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
node: pxc-node3
|
||||
unit: pxc-cluster
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.3
|
||||
image: capttofu/percona_xtradb_cluster_5_6:beta
|
||||
name: pxc-node3
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
- containerPort: 4444
|
||||
- containerPort: 4567
|
||||
- containerPort: 4568
|
||||
env:
|
||||
- name: GALERA_CLUSTER
|
||||
value: "true"
|
||||
- name: WSREP_CLUSTER_ADDRESS
|
||||
value: gcomm://
|
||||
- name: WSREP_SST_USER
|
||||
value: sst
|
||||
- name: WSREP_SST_PASSWORD
|
||||
value: sst
|
||||
- name: MYSQL_USER
|
||||
value: mysql
|
||||
- name: MYSQL_PASSWORD
|
||||
value: mysql
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: c-krit
|
1
vendor/k8s.io/kubernetes/examples/storage/redis/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/redis/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/redis/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/redis/README.md)
|
25
vendor/k8s.io/kubernetes/examples/storage/redis/image/Dockerfile
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/examples/storage/redis/image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.4
|
||||
|
||||
RUN apk add --no-cache redis sed bash
|
||||
|
||||
COPY redis-master.conf /redis-master/redis.conf
|
||||
COPY redis-slave.conf /redis-slave/redis.conf
|
||||
COPY run.sh /run.sh
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
|
||||
ENTRYPOINT [ "bash", "-c" ]
|
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-master.conf
generated
vendored
Normal file
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-master.conf
generated
vendored
Normal file
@ -0,0 +1,828 @@
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis servers but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize no
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/run/redis.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
|
||||
bind 0.0.0.0
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 60
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile ""
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving completely by commenting out all "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /redis-master-data
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Replication SYNC strategy: disk or socket.
|
||||
#
|
||||
# -------------------------------------------------------
|
||||
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
|
||||
# -------------------------------------------------------
|
||||
#
|
||||
# New slaves and reconnecting slaves that are not able to continue the replication
|
||||
# process just receiving differences, need to do what is called a "full
|
||||
# synchronization". An RDB file is transmitted from the master to the slaves.
|
||||
# The transmission can happen in two different ways:
|
||||
#
|
||||
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
|
||||
# file on disk. Later the file is transferred by the parent
|
||||
# process to the slaves incrementally.
|
||||
# 2) Diskless: The Redis master creates a new process that directly writes the
|
||||
# RDB file to slave sockets, without touching the disk at all.
|
||||
#
|
||||
# With disk-backed replication, while the RDB file is generated, more slaves
|
||||
# can be queued and served with the RDB file as soon as the current child producing
|
||||
# the RDB file finishes its work. With diskless replication instead once
|
||||
# the transfer starts, new slaves arriving will be queued and a new transfer
|
||||
# will start when the current one terminates.
|
||||
#
|
||||
# When diskless replication is used, the master waits a configurable amount of
|
||||
# time (in seconds) before starting the transfer in the hope that multiple slaves
|
||||
# will arrive and the transfer can be parallelized.
|
||||
#
|
||||
# With slow disks and fast (large bandwidth) networks, diskless replication
|
||||
# works better.
|
||||
repl-diskless-sync no
|
||||
|
||||
# When diskless replication is enabled, it is possible to configure the delay
|
||||
# the server waits in order to spawn the child that trnasfers the RDB via socket
|
||||
# to the slaves.
|
||||
#
|
||||
# This is important since once the transfer starts, it is not possible to serve
|
||||
# new slaves arriving, that will be queued for the next RDB transfer, so the server
|
||||
# waits a delay in order to let more slaves arrive.
|
||||
#
|
||||
# The delay is specified in seconds, and by default is 5 seconds. To disable
|
||||
# it entirely just set it to 0 seconds and the transfer will start ASAP.
|
||||
repl-diskless-sync-delay 5
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The bigger the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEE that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key according to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are no suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing these commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly yes
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead of waiting for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log. Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceeds the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write command was
|
||||
# already issued by the script but the user doesn't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enabled at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# Event notification ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# of zero or multiple characters. The empty string means that notifications
|
||||
# are disabled.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happen to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# actively rehash the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply from time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform according to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf
generated
vendored
Normal file
828
vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf
generated
vendored
Normal file
@ -0,0 +1,828 @@
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis servers but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize no
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/run/redis.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
|
||||
bind 0.0.0.0
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 60
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile ""
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving completely by commenting out all "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir "./"
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
slaveof %master-ip% %master-port%
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Replication SYNC strategy: disk or socket.
|
||||
#
|
||||
# -------------------------------------------------------
|
||||
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
|
||||
# -------------------------------------------------------
|
||||
#
|
||||
# New slaves and reconnecting slaves that are not able to continue the replication
|
||||
# process just receiving differences, need to do what is called a "full
|
||||
# synchronization". An RDB file is transmitted from the master to the slaves.
|
||||
# The transmission can happen in two different ways:
|
||||
#
|
||||
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
|
||||
# file on disk. Later the file is transferred by the parent
|
||||
# process to the slaves incrementally.
|
||||
# 2) Diskless: The Redis master creates a new process that directly writes the
|
||||
# RDB file to slave sockets, without touching the disk at all.
|
||||
#
|
||||
# With disk-backed replication, while the RDB file is generated, more slaves
|
||||
# can be queued and served with the RDB file as soon as the current child producing
|
||||
# the RDB file finishes its work. With diskless replication instead once
|
||||
# the transfer starts, new slaves arriving will be queued and a new transfer
|
||||
# will start when the current one terminates.
|
||||
#
|
||||
# When diskless replication is used, the master waits a configurable amount of
|
||||
# time (in seconds) before starting the transfer in the hope that multiple slaves
|
||||
# will arrive and the transfer can be parallelized.
|
||||
#
|
||||
# With slow disks and fast (large bandwidth) networks, diskless replication
|
||||
# works better.
|
||||
repl-diskless-sync no
|
||||
|
||||
# When diskless replication is enabled, it is possible to configure the delay
|
||||
# the server waits in order to spawn the child that trnasfers the RDB via socket
|
||||
# to the slaves.
|
||||
#
|
||||
# This is important since once the transfer starts, it is not possible to serve
|
||||
# new slaves arriving, that will be queued for the next RDB transfer, so the server
|
||||
# waits a delay in order to let more slaves arrive.
|
||||
#
|
||||
# The delay is specified in seconds, and by default is 5 seconds. To disable
|
||||
# it entirely just set it to 0 seconds and the transfer will start ASAP.
|
||||
repl-diskless-sync-delay 5
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The bigger the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEE that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key according to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are no suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing these commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly yes
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead of waiting for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log. Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceeds the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write command was
|
||||
# already issued by the script but the user doesn't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enabled at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# Event notification ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# of zero or multiple characters. The empty string means that notifications
|
||||
# are disabled.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happen to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# actively rehash the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply from time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform according to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
85
vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh
generated
vendored
Executable file
85
vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh
generated
vendored
Executable file
@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function launchmaster() {
|
||||
if [[ ! -e /redis-master-data ]]; then
|
||||
echo "Redis master data doesn't exist, data won't be persistent!"
|
||||
mkdir /redis-master-data
|
||||
fi
|
||||
redis-server /redis-master/redis.conf --protected-mode no
|
||||
}
|
||||
|
||||
function launchsentinel() {
|
||||
while true; do
|
||||
master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1)
|
||||
if [[ -n ${master} ]]; then
|
||||
master="${master//\"}"
|
||||
else
|
||||
master=$(hostname -i)
|
||||
fi
|
||||
|
||||
redis-cli -h ${master} INFO
|
||||
if [[ "$?" == "0" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Connecting to master failed. Waiting..."
|
||||
sleep 10
|
||||
done
|
||||
|
||||
sentinel_conf=sentinel.conf
|
||||
|
||||
echo "sentinel monitor mymaster ${master} 6379 2" > ${sentinel_conf}
|
||||
echo "sentinel down-after-milliseconds mymaster 60000" >> ${sentinel_conf}
|
||||
echo "sentinel failover-timeout mymaster 180000" >> ${sentinel_conf}
|
||||
echo "sentinel parallel-syncs mymaster 1" >> ${sentinel_conf}
|
||||
echo "bind 0.0.0.0" >> ${sentinel_conf}
|
||||
|
||||
redis-sentinel ${sentinel_conf} --protected-mode no
|
||||
}
|
||||
|
||||
function launchslave() {
|
||||
while true; do
|
||||
master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1)
|
||||
if [[ -n ${master} ]]; then
|
||||
master="${master//\"}"
|
||||
else
|
||||
echo "Failed to find master."
|
||||
sleep 60
|
||||
exit 1
|
||||
fi
|
||||
redis-cli -h ${master} INFO
|
||||
if [[ "$?" == "0" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Connecting to master failed. Waiting..."
|
||||
sleep 10
|
||||
done
|
||||
sed -i "s/%master-ip%/${master}/" /redis-slave/redis.conf
|
||||
sed -i "s/%master-port%/6379/" /redis-slave/redis.conf
|
||||
redis-server /redis-slave/redis.conf --protected-mode no
|
||||
}
|
||||
|
||||
if [[ "${MASTER}" == "true" ]]; then
|
||||
launchmaster
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${SENTINEL}" == "true" ]]; then
|
||||
launchsentinel
|
||||
exit 0
|
||||
fi
|
||||
|
||||
launchslave
|
28
vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
|
33
vendor/k8s.io/kubernetes/examples/storage/redis/redis-master.yaml
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/examples/storage/redis/redis-master.yaml
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
redis-sentinel: "true"
|
||||
role: master
|
||||
name: redis-master
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
23
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-controller.yaml
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-sentinel
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
redis-sentinel: "true"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: redis-sentinel
|
||||
redis-sentinel: "true"
|
||||
role: sentinel
|
||||
spec:
|
||||
containers:
|
||||
- name: sentinel
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
13
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-service.yaml
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/examples/storage/redis/redis-sentinel-service.yaml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: sentinel
|
||||
role: service
|
||||
name: redis-sentinel
|
||||
spec:
|
||||
ports:
|
||||
- port: 26379
|
||||
targetPort: 26379
|
||||
selector:
|
||||
redis-sentinel: "true"
|
1
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/rethinkdb/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/rethinkdb/README.md)
|
29
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-pod.yaml
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-pod.yaml
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
role: admin
|
||||
name: rethinkdb-admin
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/rethinkdb:1.16.0_1
|
||||
name: rethinkdb
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: admin-port
|
||||
- containerPort: 28015
|
||||
name: driver-port
|
||||
- containerPort: 29015
|
||||
name: cluster-port
|
||||
volumeMounts:
|
||||
- mountPath: /data/rethinkdb_data
|
||||
name: rethinkdb-storage
|
||||
volumes:
|
||||
- name: rethinkdb-storage
|
||||
emptyDir: {}
|
14
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-service.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/admin-service.yaml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
name: rethinkdb-admin
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
db: rethinkdb
|
||||
role: admin
|
12
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/driver-service.yaml
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/driver-service.yaml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
name: rethinkdb-driver
|
||||
spec:
|
||||
ports:
|
||||
- port: 28015
|
||||
targetPort: 28015
|
||||
selector:
|
||||
db: rethinkdb
|
73
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh
generated
vendored
Executable file
73
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh
generated
vendored
Executable file
@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
: ${VERSION:=1.16.0}
|
||||
|
||||
readonly NAME=${1-}
|
||||
if [[ -z "${NAME}" ]]; then
|
||||
echo -e "\033[1;31mName must be specified\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ADMIN=""
|
||||
if [[ ${NAME} == "admin" ]]; then
|
||||
ADMIN="role: admin"
|
||||
fi
|
||||
|
||||
NODE=""
|
||||
# One needs to label a node with the same key/value pair,
|
||||
# i.e., 'kubectl label nodes <node-name> name=${2}'
|
||||
if [[ ! -z "${2-}" ]]; then
|
||||
NODE="nodeSelector: { name: ${2} }"
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
${ADMIN}
|
||||
db: rethinkdb
|
||||
name: rethinkdb-${NAME}-${VERSION}
|
||||
namespace: rethinkdb
|
||||
spec:
|
||||
containers:
|
||||
- image: antmanler/rethinkdb:${VERSION}
|
||||
name: rethinkdb
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: admin-port
|
||||
protocol: TCP
|
||||
- containerPort: 28015
|
||||
name: driver-port
|
||||
protocol: TCP
|
||||
- containerPort: 29015
|
||||
name: cluster-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /data/rethinkdb_data
|
||||
name: rethinkdb-storage
|
||||
${NODE}
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /data/db/rethinkdb
|
||||
name: rethinkdb-storage
|
||||
EOF
|
27
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/Dockerfile
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM rethinkdb:1.16.0
|
||||
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -yq curl && \
|
||||
rm -rf /var/cache/apt/* && rm -rf /var/lib/apt/lists/* && \
|
||||
curl -L http://stedolan.github.io/jq/download/linux64/jq > /usr/bin/jq && \
|
||||
chmod u+x /usr/bin/jq
|
||||
|
||||
COPY ./run.sh /usr/bin/run.sh
|
||||
RUN chmod u+x /usr/bin/run.sh
|
||||
|
||||
CMD "/usr/bin/run.sh"
|
44
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
echo Checking for other nodes
|
||||
IP=""
|
||||
if [[ -n "${KUBERNETES_SERVICE_HOST}" ]]; then
|
||||
|
||||
POD_NAMESPACE=${POD_NAMESPACE:-default}
|
||||
MYHOST=$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/')
|
||||
echo My host: ${MYHOST}
|
||||
|
||||
URL="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${POD_NAMESPACE}/endpoints/rethinkdb-driver"
|
||||
echo "Endpont url: ${URL}"
|
||||
echo "Looking for IPs..."
|
||||
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
||||
# try to pick up first different ip from endpoints
|
||||
IP=$(curl -s ${URL} --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header "Authorization: Bearer ${token}" \
|
||||
| jq -s -r --arg h "${MYHOST}" '.[0].subsets | .[].addresses | [ .[].ip ] | map(select(. != $h)) | .[0]') || exit 1
|
||||
[[ "${IP}" == null ]] && IP=""
|
||||
fi
|
||||
|
||||
if [[ -n "${IP}" ]]; then
|
||||
ENDPOINT="${IP}:29015"
|
||||
echo "Join to ${ENDPOINT}"
|
||||
exec rethinkdb --bind all --join ${ENDPOINT}
|
||||
else
|
||||
echo "Start single instance"
|
||||
exec rethinkdb --bind all
|
||||
fi
|
38
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/rc.yaml
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/examples/storage/rethinkdb/rc.yaml
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
name: rethinkdb-rc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
db: rethinkdb
|
||||
role: replicas
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
db: rethinkdb
|
||||
role: replicas
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/rethinkdb:1.16.0_1
|
||||
name: rethinkdb
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: admin-port
|
||||
- containerPort: 28015
|
||||
name: driver-port
|
||||
- containerPort: 29015
|
||||
name: cluster-port
|
||||
volumeMounts:
|
||||
- mountPath: /data/rethinkdb_data
|
||||
name: rethinkdb-storage
|
||||
volumes:
|
||||
- name: rethinkdb-storage
|
||||
emptyDir: {}
|
1
vendor/k8s.io/kubernetes/examples/storage/vitess/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/storage/vitess/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file has moved to [https://github.com/kubernetes/examples/blob/master/staging/storage/vitess/README.md](https://github.com/kubernetes/examples/blob/master/staging/storage/vitess/README.md)
|
73
vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh
generated
vendored
Executable file
73
vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh
generated
vendored
Executable file
@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script generates config.sh, which is a site-local config file that is not
|
||||
# checked into source control.
|
||||
|
||||
# Select and configure Backup Storage Implementation.
|
||||
storage=gcs
|
||||
read -p "Backup Storage (file, gcs) [gcs]: "
|
||||
if [ -n "$REPLY" ]; then storage="$REPLY"; fi
|
||||
|
||||
case "$storage" in
|
||||
gcs)
|
||||
# Google Cloud Storage
|
||||
project=$(gcloud config list project | grep 'project\s*=' | sed -r 's/^.*=\s*(.*)$/\1/')
|
||||
read -p "Google Developers Console Project [$project]: "
|
||||
if [ -n "$REPLY" ]; then project="$REPLY"; fi
|
||||
if [ -z "$project" ]; then
|
||||
echo "ERROR: Project name must not be empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
read -p "Google Cloud Storage bucket for Vitess backups: " bucket
|
||||
if [ -z "$bucket" ]; then
|
||||
echo "ERROR: Bucket name must not be empty."
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
echo "NOTE: If you haven't already created this bucket, you can do so by running:"
|
||||
echo " gsutil mb gs://$bucket"
|
||||
echo
|
||||
|
||||
backup_flags=$(echo -backup_storage_implementation gcs \
|
||||
-gcs_backup_storage_project "'$project'" \
|
||||
-gcs_backup_storage_bucket "'$bucket'")
|
||||
;;
|
||||
file)
|
||||
# Mounted volume (e.g. NFS)
|
||||
read -p "Root directory for backups (usually an NFS mount): " file_root
|
||||
if [ -z "$file_root" ]; then
|
||||
echo "ERROR: Root directory must not be empty."
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
echo "NOTE: You must add your NFS mount to the vtctld-controller-template"
|
||||
echo " and vttablet-pod-template as described in the Kubernetes docs:"
|
||||
echo " http://kubernetes.io/v1.0/docs/user-guide/volumes.html#nfs"
|
||||
echo
|
||||
|
||||
backup_flags=$(echo -backup_storage_implementation file \
|
||||
-file_backup_storage_root "'$file_root'")
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unsupported backup storage implementation: $storage"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
echo "Saving config.sh..."
|
||||
echo "backup_flags=\"$backup_flags\"" > config.sh
|
||||
|
8
vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
CREATE TABLE messages (
|
||||
page BIGINT(20) UNSIGNED,
|
||||
time_created_ns BIGINT(20) UNSIGNED,
|
||||
keyspace_id BIGINT(20) UNSIGNED,
|
||||
message VARCHAR(10000),
|
||||
PRIMARY KEY (page, time_created_ns)
|
||||
) ENGINE=InnoDB
|
||||
|
63
vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an include file used by the other scripts in this directory.
|
||||
|
||||
# Most clusters will just be accessed with 'kubectl' on $PATH.
|
||||
# However, some might require a different command. For example, GKE required
|
||||
# KUBECTL='gcloud beta container kubectl' for a while. Now that most of our
|
||||
# use cases just need KUBECTL=kubectl, we'll make that the default.
|
||||
KUBECTL=${KUBECTL:-kubectl}
|
||||
|
||||
# This should match the nodePort in vtctld-service.yaml
|
||||
VTCTLD_PORT=${VTCTLD_PORT:-30001}
|
||||
|
||||
# Customizable parameters
|
||||
SHARDS=${SHARDS:-'-80,80-'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-2}
|
||||
RDONLY_COUNT=${RDONLY_COUNT:-0}
|
||||
MAX_TASK_WAIT_RETRIES=${MAX_TASK_WAIT_RETRIES:-300}
|
||||
MAX_VTTABLET_TOPO_WAIT_RETRIES=${MAX_VTTABLET_TOPO_WAIT_RETRIES:-180}
|
||||
VTTABLET_TEMPLATE=${VTTABLET_TEMPLATE:-'vttablet-pod-template.yaml'}
|
||||
VTGATE_TEMPLATE=${VTGATE_TEMPLATE:-'vtgate-controller-template.yaml'}
|
||||
VTGATE_COUNT=${VTGATE_COUNT:-1}
|
||||
CELLS=${CELLS:-'test'}
|
||||
ETCD_REPLICAS=3
|
||||
|
||||
VTGATE_REPLICAS=$VTGATE_COUNT
|
||||
|
||||
# Get the ExternalIP of any node.
|
||||
get_node_ip() {
|
||||
$KUBECTL get -o template -t '{{range (index .items 0).status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' nodes
|
||||
}
|
||||
|
||||
# Try to find vtctld address if not provided.
|
||||
get_vtctld_addr() {
|
||||
if [ -z "$VTCTLD_ADDR" ]; then
|
||||
node_ip=$(get_node_ip)
|
||||
if [ -n "$node_ip" ]; then
|
||||
VTCTLD_ADDR="$node_ip:$VTCTLD_PORT"
|
||||
fi
|
||||
fi
|
||||
echo "$VTCTLD_ADDR"
|
||||
}
|
||||
|
||||
config_file=`dirname "${BASH_SOURCE}"`/config.sh
|
||||
if [ ! -f $config_file ]; then
|
||||
echo "Please run ./configure.sh first to generate config.sh file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source $config_file
|
||||
|
54
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: etcd-{{cell}}
|
||||
spec:
|
||||
replicas: {{replicas}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: etcd
|
||||
cell: {{cell}}
|
||||
app: vitess
|
||||
spec:
|
||||
volumes:
|
||||
- name: certs
|
||||
hostPath: {path: /etc/ssl/certs}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: vitess/etcd:v2.0.13-lite
|
||||
volumeMounts:
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /etc/ssl/certs
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- >-
|
||||
ipaddr=$(hostname -i)
|
||||
|
||||
global_etcd=$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
|
||||
cell="{{cell}}" &&
|
||||
local_etcd_host_var="ETCD_${cell^^}_SERVICE_HOST" &&
|
||||
local_etcd_port_var="ETCD_${cell^^}_SERVICE_PORT" &&
|
||||
local_etcd=${!local_etcd_host_var}:${!local_etcd_port_var}
|
||||
|
||||
if [ "{{cell}}" != "global" ]; then
|
||||
until etcdctl -C "http://$global_etcd"
|
||||
set "/vt/cells/{{cell}}" "http://$local_etcd"; do
|
||||
echo "[$(date)] waiting for global etcd to register cell '{{cell}}'";
|
||||
sleep 1;
|
||||
done;
|
||||
fi
|
||||
|
||||
etcd -name $HOSTNAME -discovery {{discovery}}
|
||||
-advertise-client-urls http://$ipaddr:4001
|
||||
-initial-advertise-peer-urls http://$ipaddr:7001
|
||||
-listen-client-urls http://$ipaddr:4001
|
||||
-listen-peer-urls http://$ipaddr:7001
|
||||
|
36
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh
generated
vendored
Executable file
36
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh
generated
vendored
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that tears down the etcd servers started by
|
||||
# etcd-up.sh.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
CELLS=${CELLS:-'test'}
|
||||
cells=`echo $CELLS | tr ',' ' '`
|
||||
|
||||
# Delete replication controllers
|
||||
for cell in 'global' $cells; do
|
||||
echo "Deleting etcd replicationcontroller for $cell cell..."
|
||||
$KUBECTL delete replicationcontroller etcd-$cell
|
||||
|
||||
echo "Deleting etcd service for $cell cell..."
|
||||
$KUBECTL delete service etcd-$cell
|
||||
done
|
||||
|
16
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: etcd-{{cell}}
|
||||
labels:
|
||||
component: etcd
|
||||
cell: {{cell}}
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 4001
|
||||
selector:
|
||||
component: etcd
|
||||
cell: {{cell}}
|
||||
app: vitess
|
||||
|
60
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh
generated
vendored
Executable file
60
vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh
generated
vendored
Executable file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that creates etcd clusters.
|
||||
# Vitess requires a global cluster, as well as one for each cell.
|
||||
#
|
||||
# For automatic discovery, an etcd cluster can be bootstrapped from an
|
||||
# existing cluster. In this example, we use an externally-run discovery
|
||||
# service, but you can use your own. See the etcd docs for more:
|
||||
# https://github.com/coreos/etcd/blob/v2.0.13/Documentation/clustering.md
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
replicas=${ETCD_REPLICAS:-3}
|
||||
|
||||
CELLS=${CELLS:-'test'}
|
||||
cells=`echo $CELLS | tr ',' ' '`
|
||||
|
||||
for cell in 'global' $cells; do
|
||||
# Generate a discovery token.
|
||||
echo "Generating discovery token for $cell cell..."
|
||||
discovery=$(curl -sL https://discovery.etcd.io/new?size=$replicas)
|
||||
if [ -z "$discovery" ]; then
|
||||
echo "Failed to get etcd discovery token for cell '$cell'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the client service, which will load-balance across all replicas.
|
||||
echo "Creating etcd service for $cell cell..."
|
||||
cat etcd-service-template.yaml | \
|
||||
sed -e "s/{{cell}}/$cell/g" | \
|
||||
$KUBECTL create -f -
|
||||
|
||||
# Expand template variables
|
||||
sed_script=""
|
||||
for var in cell discovery replicas; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
# Create the replication controller.
|
||||
echo "Creating etcd replicationcontroller for $cell cell..."
|
||||
cat etcd-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
done
|
||||
|
23
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: guestbook
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: vitess/guestbook:v2.0.0-alpha5
|
||||
ports:
|
||||
- name: http-server
|
||||
containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
|
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-down.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-down.sh
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that stops guestbook.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Deleting guestbook replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller guestbook
|
||||
|
||||
echo "Deleting guestbook service..."
|
||||
$KUBECTL delete service guestbook
|
16
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: guestbook
|
||||
labels:
|
||||
component: guestbook
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http-server
|
||||
selector:
|
||||
component: guestbook
|
||||
app: vitess
|
||||
type: LoadBalancer
|
||||
|
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-up.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-up.sh
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that starts a guestbook replicationcontroller.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Creating guestbook service..."
|
||||
$KUBECTL create -f guestbook-service.yaml
|
||||
|
||||
echo "Creating guestbook replicationcontroller..."
|
||||
$KUBECTL create -f guestbook-controller.yaml
|
23
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-down.sh
generated
vendored
Executable file
23
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-down.sh
generated
vendored
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
./vtgate-down.sh
|
||||
SHARDS=$SHARDS CELLS=$CELLS TABLETS_PER_SHARD=$TABLETS_PER_SHARD ./vttablet-down.sh
|
||||
./vtctld-down.sh
|
||||
./etcd-down.sh
|
165
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh
generated
vendored
Executable file
165
vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh
generated
vendored
Executable file
@ -0,0 +1,165 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that creates a fully functional vitess cluster.
|
||||
# It performs the following steps:
|
||||
# - Create etcd clusters
|
||||
# - Create vtctld pod
|
||||
# - Create vttablet pods
|
||||
# - Perform vtctl initialization:
|
||||
# SetKeyspaceShardingInfo, Rebuild Keyspace, Reparent Shard, Apply Schema
|
||||
# - Create vtgate pods
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
cells=`echo $CELLS | tr ',' ' '`
|
||||
num_cells=`echo $cells | wc -w`
|
||||
|
||||
function update_spinner_value () {
|
||||
spinner='-\|/'
|
||||
cur_spinner=${spinner:$(($1%${#spinner})):1}
|
||||
}
|
||||
|
||||
function wait_for_running_tasks () {
|
||||
# This function waits for pods to be in the "Running" state
|
||||
# 1. task_name: Name that the desired task begins with
|
||||
# 2. num_tasks: Number of tasks to wait for
|
||||
# Returns:
|
||||
# 0 if successful, -1 if timed out
|
||||
task_name=$1
|
||||
num_tasks=$2
|
||||
counter=0
|
||||
|
||||
echo "Waiting for ${num_tasks}x $task_name to enter state Running"
|
||||
|
||||
while [ $counter -lt $MAX_TASK_WAIT_RETRIES ]; do
|
||||
# Get status column of pods with name starting with $task_name,
|
||||
# count how many are in state Running
|
||||
num_running=`$KUBECTL get pods | grep ^$task_name | grep Running | wc -l`
|
||||
|
||||
echo -en "\r$task_name: $num_running out of $num_tasks in state Running..."
|
||||
if [ $num_running -eq $num_tasks ]
|
||||
then
|
||||
echo Complete
|
||||
return 0
|
||||
fi
|
||||
update_spinner_value $counter
|
||||
echo -n $cur_spinner
|
||||
let counter=counter+1
|
||||
sleep 1
|
||||
done
|
||||
echo Timed out
|
||||
return -1
|
||||
}
|
||||
|
||||
if [ -z "$GOPATH" ]; then
|
||||
echo "ERROR: GOPATH undefined, can't obtain vtctlclient"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
export KUBECTL='kubectl'
|
||||
|
||||
echo "Downloading and installing vtctlclient..."
|
||||
go get -u github.com/youtube/vitess/go/cmd/vtctlclient
|
||||
num_shards=`echo $SHARDS | tr "," " " | wc -w`
|
||||
total_tablet_count=$(($num_shards*$TABLETS_PER_SHARD*$num_cells))
|
||||
vtgate_count=$VTGATE_COUNT
|
||||
if [ $vtgate_count -eq 0 ]; then
|
||||
vtgate_count=$(($total_tablet_count/4>3?$total_tablet_count/4:3))
|
||||
fi
|
||||
|
||||
echo "****************************"
|
||||
echo "*Creating vitess cluster:"
|
||||
echo "* Shards: $SHARDS"
|
||||
echo "* Tablets per shard: $TABLETS_PER_SHARD"
|
||||
echo "* Rdonly per shard: $RDONLY_COUNT"
|
||||
echo "* VTGate count: $vtgate_count"
|
||||
echo "* Cells: $cells"
|
||||
echo "****************************"
|
||||
|
||||
echo 'Running etcd-up.sh' && CELLS=$CELLS ./etcd-up.sh
|
||||
wait_for_running_tasks etcd-global 3
|
||||
for cell in $cells; do
|
||||
wait_for_running_tasks etcd-$cell 3
|
||||
done
|
||||
|
||||
echo 'Running vtctld-up.sh' && ./vtctld-up.sh
|
||||
echo 'Running vttablet-up.sh' && CELLS=$CELLS ./vttablet-up.sh
|
||||
echo 'Running vtgate-up.sh' && ./vtgate-up.sh
|
||||
|
||||
wait_for_running_tasks vtctld 1
|
||||
wait_for_running_tasks vttablet $total_tablet_count
|
||||
wait_for_running_tasks vtgate $vtgate_count
|
||||
|
||||
vtctld_port=30001
|
||||
vtctld_ip=`kubectl get -o yaml nodes | grep 'type: ExternalIP' -B 1 | head -1 | awk '{print $NF}'`
|
||||
vtctl_server="$vtctld_ip:$vtctld_port"
|
||||
kvtctl="$GOPATH/bin/vtctlclient -server $vtctl_server"
|
||||
|
||||
echo Waiting for tablets to be visible in the topology
|
||||
counter=0
|
||||
while [ $counter -lt $MAX_VTTABLET_TOPO_WAIT_RETRIES ]; do
|
||||
num_tablets=0
|
||||
for cell in $cells; do
|
||||
num_tablets=$(($num_tablets+`$kvtctl ListAllTablets $cell | wc -l`))
|
||||
done
|
||||
echo -en "\r$num_tablets out of $total_tablet_count in topology..."
|
||||
if [ $num_tablets -eq $total_tablet_count ]
|
||||
then
|
||||
echo Complete
|
||||
break
|
||||
fi
|
||||
update_spinner_value $counter
|
||||
echo -n $cur_spinner
|
||||
let counter=counter+1
|
||||
sleep 1
|
||||
if [ $counter -eq $MAX_VTTABLET_TOPO_WAIT_RETRIES ]
|
||||
then
|
||||
echo Timed out
|
||||
fi
|
||||
done
|
||||
|
||||
# split_shard_count = num_shards for sharded keyspace, 0 for unsharded
|
||||
split_shard_count=$num_shards
|
||||
if [ $split_shard_count -eq 1 ]; then
|
||||
split_shard_count=0
|
||||
fi
|
||||
|
||||
echo -n Setting Keyspace Sharding Info...
|
||||
$kvtctl SetKeyspaceShardingInfo -force -split_shard_count $split_shard_count test_keyspace keyspace_id uint64
|
||||
echo Done
|
||||
echo -n Rebuilding Keyspace Graph...
|
||||
$kvtctl RebuildKeyspaceGraph test_keyspace
|
||||
echo Done
|
||||
echo -n Reparenting...
|
||||
shard_num=1
|
||||
for shard in $(echo $SHARDS | tr "," " "); do
|
||||
$kvtctl InitShardMaster -force test_keyspace/$shard `echo $cells | awk '{print $1}'`-0000000${shard_num}00
|
||||
let shard_num=shard_num+1
|
||||
done
|
||||
echo Done
|
||||
echo -n Applying Schema...
|
||||
$kvtctl ApplySchema -sql "$(cat create_test_table.sql)" test_keyspace
|
||||
echo Done
|
||||
|
||||
echo "****************************"
|
||||
echo "* Complete!"
|
||||
echo "* Use the following line to make an alias to kvtctl:"
|
||||
echo "* alias kvtctl='\$GOPATH/bin/vtctlclient -server $vtctl_server'"
|
||||
echo "* See the vtctld UI at: http://${vtctld_ip}:30000"
|
||||
echo "****************************"
|
||||
|
55
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtctld
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: vtctld
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: vtctld
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /etc/ssl/certs
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt &&
|
||||
su -p -c "/vt/bin/vtctld
|
||||
-debug
|
||||
-templates $VTTOP/go/cmd/vtctld/templates
|
||||
-web_dir $VTTOP/web/vtctld
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port 15000
|
||||
-grpc_port 15001
|
||||
-service_map 'grpc-vtctl'
|
||||
-topo_implementation etcd
|
||||
-tablet_protocol grpc
|
||||
-tablet_manager_protocol grpc
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
{{backup_flags}}" vitess
|
||||
volumes:
|
||||
- name: syslog
|
||||
hostPath: {path: /dev/log}
|
||||
- name: vtdataroot
|
||||
emptyDir: {}
|
||||
- name: certs
|
||||
hostPath: {path: /etc/ssl/certs}
|
||||
|
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-down.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-down.sh
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that stops vtctld.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Deleting vtctld replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller vtctld
|
||||
|
||||
echo "Deleting vtctld service..."
|
||||
$KUBECTL delete service vtctld
|
22
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtctld
|
||||
labels:
|
||||
component: vtctld
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 15000
|
||||
name: web
|
||||
targetPort: 15000
|
||||
nodePort: 30000
|
||||
- port: 15001
|
||||
name: grpc
|
||||
targetPort: 15001
|
||||
nodePort: 30001
|
||||
selector:
|
||||
component: vtctld
|
||||
app: vitess
|
||||
type: NodePort
|
||||
|
40
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh
generated
vendored
Executable file
40
vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh
generated
vendored
Executable file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that starts vtctld.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Creating vtctld service..."
|
||||
$KUBECTL create -f vtctld-service.yaml
|
||||
|
||||
echo "Creating vtctld replicationcontroller..."
|
||||
# Expand template variables
|
||||
sed_script=""
|
||||
for var in backup_flags; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
# Instantiate template and send to kubectl.
|
||||
cat vtctld-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
|
||||
server=$(get_vtctld_addr)
|
||||
echo
|
||||
echo "vtctld address: http://$server"
|
||||
|
45
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtgate
|
||||
spec:
|
||||
replicas: {{replicas}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: vtgate
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: vtgate
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt &&
|
||||
su -p -c "/vt/bin/vtgate
|
||||
-topo_implementation etcd
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port 15001
|
||||
-tablet_protocol grpc
|
||||
-service_map 'bsonrpc-vt-vtgateservice'
|
||||
-cell test" vitess
|
||||
volumes:
|
||||
- name: syslog
|
||||
hostPath: {path: /dev/log}
|
||||
- name: vtdataroot
|
||||
emptyDir: {}
|
||||
|
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-down.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-down.sh
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that stops vtgate.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Deleting vtgate replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller vtgate
|
||||
|
||||
echo "Deleting vtgate service..."
|
||||
$KUBECTL delete service vtgate
|
15
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vtgate
|
||||
labels:
|
||||
component: vtgate
|
||||
app: vitess
|
||||
spec:
|
||||
ports:
|
||||
- port: 15001
|
||||
selector:
|
||||
component: vtgate
|
||||
app: vitess
|
||||
type: LoadBalancer
|
||||
|
38
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-up.sh
generated
vendored
Executable file
38
vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-up.sh
generated
vendored
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that starts a vtgate replicationcontroller.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
VTGATE_REPLICAS=${VTGATE_REPLICAS:-3}
|
||||
VTGATE_TEMPLATE=${VTGATE_TEMPLATE:-'vtgate-controller-template.yaml'}
|
||||
|
||||
replicas=$VTGATE_REPLICAS
|
||||
|
||||
echo "Creating vtgate service..."
|
||||
$KUBECTL create -f vtgate-service.yaml
|
||||
|
||||
sed_script=""
|
||||
for var in replicas; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
echo "Creating vtgate replicationcontroller..."
|
||||
cat $VTGATE_TEMPLATE | sed -e "$sed_script" | $KUBECTL create -f -
|
51
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh
generated
vendored
Executable file
51
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh
generated
vendored
Executable file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that tears down the vttablet pods started by
|
||||
# vttablet-up.sh.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
server=$(get_vtctld_addr)
|
||||
|
||||
# Delete the pods for all shards
|
||||
CELLS=${CELLS:-'test'}
|
||||
keyspace='test_keyspace'
|
||||
SHARDS=${SHARDS:-'0'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-5}
|
||||
UID_BASE=${UID_BASE:-100}
|
||||
|
||||
num_shards=`echo $SHARDS | tr "," " " | wc -w`
|
||||
uid_base=$UID_BASE
|
||||
|
||||
for shard in `seq 1 $num_shards`; do
|
||||
cell_index=0
|
||||
for cell in `echo $CELLS | tr "," " "`; do
|
||||
for uid_index in `seq 0 $(($TABLETS_PER_SHARD-1))`; do
|
||||
uid=$[$uid_base + $uid_index + $cell_index]
|
||||
printf -v alias '%s-%010d' $cell $uid
|
||||
|
||||
echo "Deleting pod for tablet $alias..."
|
||||
$KUBECTL delete pod vttablet-$uid
|
||||
done
|
||||
let cell_index=cell_index+100000000
|
||||
done
|
||||
let uid_base=uid_base+100
|
||||
done
|
||||
|
128
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml
generated
vendored
Normal file
128
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vttablet-{{uid}}
|
||||
labels:
|
||||
component: vttablet
|
||||
keyspace: "{{keyspace}}"
|
||||
shard: "{{shard_label}}"
|
||||
tablet: "{{alias}}"
|
||||
app: vitess
|
||||
spec:
|
||||
containers:
|
||||
- name: vttablet
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /etc/ssl/certs
|
||||
resources:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- >-
|
||||
set -e
|
||||
|
||||
mysql_socket="$VTDATAROOT/{{tablet_subdir}}/mysql.sock"
|
||||
|
||||
mkdir -p $VTDATAROOT/tmp
|
||||
|
||||
chown -R vitess /vt
|
||||
|
||||
while [ ! -e $mysql_socket ]; do
|
||||
echo "[$(date)] waiting for $mysql_socket" ;
|
||||
sleep 1 ;
|
||||
done
|
||||
|
||||
su -p -s /bin/bash -c "mysql -u vt_dba -S $mysql_socket
|
||||
-e 'CREATE DATABASE IF NOT EXISTS vt_{{keyspace}}'" vitess
|
||||
|
||||
su -p -s /bin/bash -c "/vt/bin/vttablet
|
||||
-topo_implementation etcd
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port {{port}}
|
||||
-grpc_port {{grpc_port}}
|
||||
-service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream'
|
||||
-binlog_player_protocol grpc
|
||||
-tablet-path {{alias}}
|
||||
-tablet_hostname $(hostname -i)
|
||||
-init_keyspace {{keyspace}}
|
||||
-init_shard {{shard}}
|
||||
-target_tablet_type {{tablet_type}}
|
||||
-mysqlctl_socket $VTDATAROOT/mysqlctl.sock
|
||||
-db-config-app-uname vt_app
|
||||
-db-config-app-dbname vt_{{keyspace}}
|
||||
-db-config-app-charset utf8
|
||||
-db-config-dba-uname vt_dba
|
||||
-db-config-dba-dbname vt_{{keyspace}}
|
||||
-db-config-dba-charset utf8
|
||||
-db-config-repl-uname vt_repl
|
||||
-db-config-repl-dbname vt_{{keyspace}}
|
||||
-db-config-repl-charset utf8
|
||||
-db-config-filtered-uname vt_filtered
|
||||
-db-config-filtered-dbname vt_{{keyspace}}
|
||||
-db-config-filtered-charset utf8
|
||||
-enable-rowcache
|
||||
-rowcache-bin /usr/bin/memcached
|
||||
-rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock
|
||||
-health_check_interval 5s
|
||||
-restore_from_backup {{backup_flags}}" vitess
|
||||
- name: mysql
|
||||
image: vitess/lite:v2.0.0-alpha5
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
resources:
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt
|
||||
|
||||
su -p -c "/vt/bin/mysqlctld
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-tablet_uid {{uid}}
|
||||
-socket_file $VTDATAROOT/mysqlctl.sock
|
||||
-db-config-app-uname vt_app
|
||||
-db-config-app-dbname vt_{{keyspace}}
|
||||
-db-config-app-charset utf8
|
||||
-db-config-dba-uname vt_dba
|
||||
-db-config-dba-dbname vt_{{keyspace}}
|
||||
-db-config-dba-charset utf8
|
||||
-db-config-repl-uname vt_repl
|
||||
-db-config-repl-dbname vt_{{keyspace}}
|
||||
-db-config-repl-charset utf8
|
||||
-db-config-filtered-uname vt_filtered
|
||||
-db-config-filtered-dbname vt_{{keyspace}}
|
||||
-db-config-filtered-charset utf8
|
||||
-bootstrap_archive mysql-db-dir_10.0.13-MariaDB.tbz" vitess
|
||||
# The bootstrap archive above contains an empty mysql data dir
|
||||
# with user permissions set up as required by Vitess. The archive is
|
||||
# included in the Docker image.
|
||||
env:
|
||||
- name: EXTRA_MY_CNF
|
||||
value: /vt/config/mycnf/master_mariadb.cnf
|
||||
volumes:
|
||||
- name: syslog
|
||||
hostPath: {path: /dev/log}
|
||||
- name: vtdataroot
|
||||
emptyDir: {}
|
||||
- name: certs
|
||||
hostPath: {path: /etc/ssl/certs}
|
||||
|
68
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-up.sh
generated
vendored
Executable file
68
vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-up.sh
generated
vendored
Executable file
@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is an example script that creates a vttablet deployment.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
# Create the pods for shard-0
|
||||
CELLS=${CELLS:-'test'}
|
||||
keyspace='test_keyspace'
|
||||
SHARDS=${SHARDS:-'0'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-5}
|
||||
port=15002
|
||||
grpc_port=16002
|
||||
UID_BASE=${UID_BASE:-100}
|
||||
VTTABLET_TEMPLATE=${VTTABLET_TEMPLATE:-'vttablet-pod-template.yaml'}
|
||||
RDONLY_COUNT=${RDONLY_COUNT:-2}
|
||||
|
||||
uid_base=$UID_BASE
|
||||
for shard in $(echo $SHARDS | tr "," " "); do
|
||||
cell_index=0
|
||||
for cell in `echo $CELLS | tr ',' ' '`; do
|
||||
echo "Creating $keyspace.shard-$shard pods in cell $CELL..."
|
||||
for uid_index in `seq 0 $(($TABLETS_PER_SHARD-1))`; do
|
||||
uid=$[$uid_base + $uid_index + $cell_index]
|
||||
printf -v alias '%s-%010d' $cell $uid
|
||||
printf -v tablet_subdir 'vt_%010d' $uid
|
||||
|
||||
echo "Creating pod for tablet $alias..."
|
||||
|
||||
# Add xx to beginning or end if there is a dash. K8s does not allow for
|
||||
# leading or trailing dashes for labels
|
||||
shard_label=`echo $shard | sed s'/[-]$/-xx/' | sed s'/^-/xx-/'`
|
||||
|
||||
tablet_type=replica
|
||||
if [ $uid_index -gt $(($TABLETS_PER_SHARD-$RDONLY_COUNT-1)) ]; then
|
||||
tablet_type=rdonly
|
||||
fi
|
||||
|
||||
# Expand template variables
|
||||
sed_script=""
|
||||
for var in alias cell uid keyspace shard shard_label port grpc_port tablet_subdir tablet_type backup_flags; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
|
||||
# Instantiate template and send to kubectl.
|
||||
cat $VTTABLET_TEMPLATE | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
done
|
||||
let cell_index=cell_index+100000000
|
||||
done
|
||||
let uid_base=uid_base+100
|
||||
done
|
Reference in New Issue
Block a user