Elasticsearch and Kibana (Docker Swarm) - CloudCommandos/JohnChan GitHub Wiki
Introduction
This guide demonstrates how to set up a 3 node cluster of Elasticsearch and a Kibana instance in a single VM running Docker Swarm.
Assumptions
- You are running Photon3.0 Minimal
- You are running Docker in Swarm mode
Setup
Insert into /etc/sysctl.conf
vi /etc/sysctl.conf
# ...
vm.max_map_count=262144
apply sysctl changes
sysctl -p
Edit containerd.service
systemctl edit containerd.service
[Service]
LimitMEMLOCK=infinity
Make project directory
mkdir -p /opt/docker/elasticsearch-kibana
cd /opt/docker/elasticsearch-kibana
Create the following files:
.env
create-certs.yaml
instances.yaml
docker-compose.yaml
before_up.sh
up.sh
after_up.sh
down.sh
.env
CERTS_DIR=/usr/share/elasticsearch/config/certificates
VERSION=7.9.1
create-certs.yaml
version: '2.2'
services:
create_certs:
image: yourprivateregistry/elasticsearch:${VERSION}
container_name: create_certs
command: >
bash -c '
yum install -y -q -e 0 unzip;
if [ ! -f /certs/bundle.zip ](/CloudCommandos/JohnChan/wiki/-!--f-/certs/bundle.zip-); then
bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yaml -out /certs/bundle.zip;
unzip /certs/bundle.zip -d /certs;
fi;
chown -R 1000:0 /certs
'
working_dir: /usr/share/elasticsearch
volumes:
- certs:/certs
- .:/usr/share/elasticsearch/config/certificates
networks:
- elastic
volumes:
certs:
driver: local
networks:
elastic:
driver: bridge
instances.yaml
instances:
- name: es01
dns:
- es01
- localhost
ip:
- 127.0.0.1
- name: es02
dns:
- es02
- localhost
ip:
- 127.0.0.1
- name: es03
dns:
- es03
- localhost
ip:
- 127.0.0.1
- name: 'kib01'
dns:
- kib01
- localhost
docker-compose.yaml
(Some properties are kept to be able to run with docker-compose)
version: "3.8"
services:
es01:
image: yourprivateregistry/elasticsearch:${VERSION}
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- network.publish_host=_eth0_
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
- ELASTIC_PASSWORD_FILE=/run/secrets/elastic_password
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es01/es01.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
secrets:
- source: elastic_password
target: elastic_password
uid: '1000'
gid: '0'
mode: 0400
deploy:
replicas: 1
restart_policy:
condition: any
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
- certs:$CERTS_DIR
ports:
- 9201:9200
networks:
- elastic
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [ $$? == 52 ](/CloudCommandos/JohnChan/wiki/-$$?-==-52-); then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
es02:
image: yourprivateregistry/elasticsearch:${VERSION}
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- network.publish_host=_eth0_
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
- ELASTIC_PASSWORD_FILE=/run/secrets/elastic_password
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es02/es02.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key
secrets:
- source: elastic_password
target: elastic_password
uid: '1000'
gid: '0'
mode: 0400
deploy:
replicas: 1
restart_policy:
condition: any
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
- certs:$CERTS_DIR
ports:
- 9202:9200
networks:
- elastic
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [ $$? == 52 ](/CloudCommandos/JohnChan/wiki/-$$?-==-52-); then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
es03:
image: yourprivateregistry/elasticsearch:${VERSION}
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- network.publish_host=_eth0_
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
- ELASTIC_PASSWORD_FILE=/run/secrets/elastic_password
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es03/es03.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es03/es03.key
secrets:
- source: elastic_password
target: elastic_password
uid: '1000'
gid: '0'
mode: 0400
deploy:
replicas: 1
restart_policy:
condition: any
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
- certs:$CERTS_DIR
ports:
- 9203:9200
networks:
- elastic
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [ $$? == 52 ](/CloudCommandos/JohnChan/wiki/-$$?-==-52-); then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
kib01:
image: yourprivateregistry/kibana:${VERSION}
ports:
- 5601:5601
environment:
SERVERNAME: localhost
ELASTICSEARCH_URL: https://es01:9200
ELASTICSEARCH_HOSTS: https://es01:9200
ELASTICSEARCH_USERNAME: elastic
# ELASTICSEARCH_PASSWORD: not_safe_to_set_here
ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt
SERVER_SSL_ENABLED: "true"
SERVER_SSL_KEY: $CERTS_DIR/kib01/kib01.key
SERVER_SSL_CERTIFICATE: $CERTS_DIR/kib01/kib01.crt
secrets:
- source: elastic_password
target: elastic_password
uid: '1000'
gid: '0'
mode: 0400
deploy:
replicas: 1
restart_policy:
condition: any
volumes:
- kibdata:/usr/share/kibana/config/
- certs:$CERTS_DIR
networks:
- elastic
secrets:
elastic_password:
file: assets/elastic_pass
volumes:
data01:
data02:
data03:
kibdata:
certs:
networks:
elastic:
before_up.sh
:
#!/bin/bash
# Create certs
docker-compose -f create-certs.yaml run --rm create_certs
up.sh
(your deployment name is assumed to be es
, change accordingly):
#!/bin/bash
# Use docker-compose to read in .env to substitute variables before running with docker stack.
docker stack deploy -c <(docker-compose -f docker-compose.yaml config) es
after_up.sh
:
#!/bin/bash
deployment_name="es"
service_name="kib01"
path_to_secret="/run/secrets/elastic_password"
# Ensure Kibana keystore
echo "Ensuring Kibana Keystore ..."
docker exec -it $(docker ps --filter name=${deployment_name}_${service_name}* -q) \
bash -c "if ! test -f '/usr/share/kibana/config/kibana.keystore'; then kibana-keystore create; fi"
# Set Kibana's Elasticsearch connection password from mounted Docker secret
echo "Setting Elasticsearch connection password ..."
docker exec -it $(docker ps --filter name=${deployment_name}_${service_name}* -q) \
bash -c "cat $path_to_secret | kibana-keystore add elasticsearch.password --stdin -s"
# Restart container
echo "Restarting Kibana container ..."
docker service update --force \
$(docker stack services es --filter name=${deployment_name}_${service_name} -q) > /dev/null
# Clean stopped containers
echo "Cleaning stopped container(s) ..."
docker rm $(docker ps -aq) > /dev/null 2>&1
echo "Done"
down.sh
:
#!/bin/bash
docker stack rm es
Set appropriate permissions for bash scripts
chmod 700 before_up.sh up.sh after_up.sh down.sh
Create a file to hold your password
mkdir /opt/docker/elasticsearch-kibana/assets
cd /opt/docker/elasticsearch-kibana/assets
nano elastic_pass
YOUR_PASSWORD
chmod 600 elastic_pass
Operation
cd /opt/docker/elasticsearch-kibana
Run before_up.sh
to create SSL certificates inside specified Docker volume
./before_up.sh
Run up.sh
to interpret docker-compose.yaml with Docker Swarm compatibility to insert .env variable values before deploying the stack
./up.sh
Run after_up.sh
to set the password of Kibana to connect to Elasticsearch using Docker Secret
./after_up.sh
You can visit Kibana's web platform via https://192.168.3.157:5601
.
username: elastic
password: YOUR_PASSWORD
Run down.sh
to remove the stack
./down.sh
Useful Links
Elasticsearch Docker Swarm discovery problems