elastic search cluster with multiple nodes - nextcloud/fulltextsearch GitHub Wiki

Purpose

to allow index and provide search results for the multi-node and/or round-robin deployment of nextcloud, in this example using docker, docker-compose with possible swarm adaptation (compose file tune up)

Scope

allow indexing and search for multi-node deployment

  • database: galera cluster, with mariadb
  • files: glusterfs, local or same server bricks with native fuse mount, possible NFS mount (via gluster ganesha NFS export)
  • network: docker bridge virtual network for services cluster.

notes:

  • elastic docker yml has production settings

  • for production system settings see elastic

  • kibana is optional

  • to install docker and docker-compose

  • for most os docker curl -sSL https://get.docker.com/ | CHANNEL=stable sh

  • for most os docker-compose

     curl -L https://github.com/docker/compose/releases/download/$(curl -Ls https://www.servercow.de/docker-compose/latest.php)/docker-compose-$(uname -s)-$(uname -m) > /usr/local/bin/docker-compose
     chmod +x /usr/local/bin/docker-compose
    
  • Dockerfile is modified for ingest-attachment plugin to index inside certain file formats


In folder with docker-compose.yml on all nodes run docker-compose up -d to pull, build and start containers as daemons

You might want to setup each node to have full index for speed by injecting number_of_replicas in to cluster equal to cluster nodes - 1 (one of them is primary, not replica). Where ncfiles3 is Index name from Full text index Nextcloud admin settings:

 curl -XPUT 'localhost:9200/ncfiles3/_settings?pretty' -H 'Content-Type: application/json' -d'
 {
     "index" : {
         "number_of_replicas" : 4
     }
 }
 '

Access cluster status through kibana http://localhost:45601/app/monitoring#/elasticsearch/indices/ncfiles3


kibana view on index

./docker-compose.yml

    version: '2.1'
    services:
        elasticsearch:
          build: ./elasticsearch
          container_name: nces
          restart: always
          depends_on:
            mysqldb:
              condition: service_healthy
          environment:
             - cluster.name=ncsearch
             - bootstrap.memory_lock=true
             - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
             # vmX host returns external IPs of other nodes from local DNS
             # (specified as 172.22.1.254 for local Docker network, 
             # might be any controlled DNS server or hosts file) 
             - "discovery.zen.ping.unicast.hosts=vm1,vm2,vm3,vm4,vm5" 
             - "network.host=0.0.0.0"
             - "transport.host=0.0.0.0"
             - "transport.bind_host=0.0.0.0"
             # me.public returned as current node external IP you want cluster
             # to communicate through, might be possible to use hosts file
             - "transport.publish_host=me.public" 
             - "xpack.security.enabled=false"
             - "transport.tcp.compress=true"
          ulimits:
            memlock:
              soft: -1
              hard: -1
          # location for persistent data, in this case local SSD maount for speed
          volumes:
            - ../localssd/elasticsearch:/usr/share/elasticsearch/data
          ports: # in case you have nextcloud outside this docker network
            - 9300:9300
            - 9200:9200
          expose:  # in case you have nextcloud inside this docker network
             - "9200"
             - "9300"
          dns:
            - 172.22.1.254
          networks:
            cluster-network:
              ipv4_address: 172.22.1.224
              aliases:
                - elasticsearch

    # just a web interface to monitor status
    kibana:
      image: docker.elastic.co/kibana/kibana:6.1.1
      container_name: kibana
      restart: always
      depends_on:
         - elasticsearch
      environment:
        ELASTICSEARCH_URL: http://172.22.1.224:9200
      ports:
        - 5601:5601
      dns:
        - 172.22.1.254
      networks:
        cluster-network:
          ipv4_address: 172.22.1.223
          aliases:
            - kibana

networks:
  cluster-network:
    driver: bridge
    enable_ipv6: true
    ipam:
      driver: default
      config:
        - subnet: 172.22.1.0/24
        - subnet: fd4d:6169:6c63:6f77::/64

./elasticsearch/Dockerfile

 FROM docker.elastic.co/elasticsearch/elasticsearch:6.1.1
 RUN /usr/share/elasticsearch/bin/elasticsearch-plugin install --batch ingest-attachment

kibana view of cluster


For reference

the rest of the Nextcloud related setup, rest of ./docker-compose.yml

 version: '2.1'
 services:
 
     unbound-cluster:
       container_name: unbound
       image: cluster/unbound:1.0
       build: ./data/Dockerfiles/unbound
       command: /usr/sbin/unbound
       depends_on:
         mysql-cluster:
           condition: service_healthy
       ports:
          - '0.0.0.0:53:53/udp'
       volumes:
         - ./data/conf/unbound/unbound.conf:/etc/unbound/unbound.conf:ro
       restart: always
       networks:
         cluster-network:
           ipv4_address: 172.22.1.254
           aliases:
             - unbound
 
     mysql-cluster:
       container_name: db
       image: mariadb:10.2
       command: mysqld --max_allowed_packet=192M --max-connections=3500 --innodb-strict-mode=0 --skip-host-cache --skip-name-resolve --log-warnings=0
       healthcheck:
         test: ["CMD", "mysqladmin", "-unextcloud", "-p3ih3u3ihi4muby8g8un7guhunh3finugu",  "ping", "-h", "localhost"]
         interval: 5s
         timeout: 5s
         retries: 10
       volumes:
         - mysql-vol-1:/var/lib/mysql/
         - ./data/conf/mysql/:/etc/mysql/conf.d/:ro
       environment:
         - MYSQL_ROOT_PASSWORD=3ih3u3ihi4mubyfinugu8g8un7guhunh3
         - MYSQL_DATABASE=nextcloud
         - MYSQL_USER=nextcloud
         - MYSQL_PASSWORD=3ih3u3ihi4muby8g8un7guhunh3finugu
       ports:
         - "0.0.0.0:4567:4567"
         - "0.0.0.0:4568:4568"
         - "0.0.0.0:4444:4444"
       restart: always
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.250
           aliases:
             - mysql
 
     redis-cluster:
       container_name: redis
       image: redis:alpine
       volumes:
         - redis-vol-1:/data/
       restart: always
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.249
           aliases:
             - redis
 
     nextcloudapp:
       container_name: ncapp
       image: nextcloud:12.0.5-fpm
       links:
         - mysql-cluster
       depends_on:
         - mysql-cluster
         - redis-cluster
       environment:
          - MYSQL_HOST=mysql
       expose:
          - "9000"
       volumes:
         - ./ncredis.config.php:/usr/src/nextcloud/config/redis.config.php:rw
         - ./nextcloud/data:/var/www/html/data
         - ./nextcloud/config:/var/www/html/config
         - nextcloudapp6:/var/www/html
       restart: always
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.238
           aliases:
             - nextcloud
 
     nextcloudweb:
       container_name: ncweb
       image: nginx:stable
       depends_on:
          - nextcloudapp
       expose:
          - "81"
       links:
         - nextcloudapp
       volumes:
         - nextcloudapp6:/var/www/html:ro
         - ./nextcloud/config:/var/www/html/config:ro
         - ./ncnginx.conf:/etc/nginx/nginx.conf
       restart: always
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.235
           aliases:
             - nextcloudweb
 
     nextcloudcron:
       container_name: cron
       image: nextcloud:12.0.5-fpm
       restart: always
       depends_on:
         - mysql-cluster
         - redis-cluster
         - nextcloudapp
       volumes_from:
          - nextcloudapp
       user: www-data
       environment:
          - MYSQL_HOST=mysql
       entrypoint: |
         bash -c 'bash -s <<EOF
           trap "break;exit" SIGHUP SIGINT SIGTERM
           while [ ! -f /var/www/html/config/config.php ]; do
             sleep 1
           done
           while true; do
             php -f /var/www/html/cron.php
             sleep 15m
           done
         EOF'
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.227
           aliases:
             - cron
 
     oodata:
       container_name: oodata
       image: onlyoffice/documentserver:5.0.6.14
       environment:
          - ONLYOFFICE_DATA_CONTAINER=true
          - POSTGRESQL_SERVER_HOST=oodb
          - POSTGRESQL_SERVER_PORT=5432
          - POSTGRESQL_SERVER_DB_NAME=onlyoffice
          - POSTGRESQL_SERVER_USER=onlyoffice
          - POSTGRESQL_SERVER_PASS=byfinugu8g8un7guhunh33ih3u3ihi4mu
          - 'RABBITMQ_SERVER_URL=amqp://guest:guest@oorabbit'
          - REDIS_SERVER_HOST=redis-cluster
          - REDIS_SERVER_PORT=6379
       stdin_open: true
       restart: always
       ports:
          - '55580:80'
       volumes:
        - documentdata1:/var/www/onlyoffice/Data
        - documentlog1:/var/log/onlyoffice
        - documentcache1:/var/lib/onlyoffice/documentserver/App_Data/cache/files
        - ./onlyoffice/pub:/documentcache/var/www/onlyoffice/documentserver-example/public/files
        - ./onlyoffice/document_fonts:/usr/share/fonts
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.232
           aliases:
             - oodata
 
     oodoc:
       container_name: doc
       image: onlyoffice/documentserver:5.0.6.14
       depends_on:
          - oodata
          - oodb
          - redis-cluster
          - oorabbit
       environment:
          - ONLYOFFICE_DATA_CONTAINER_HOST=oodata
          - BALANCE=uri depth 3
          - EXCLUDE_PORTS=443
          - HTTP_CHECK=GET /healthcheck
          - EXTRA_SETTINGS=http-check expect string true
       stdin_open: true
       restart: always
       ports:
          - '44480:80'
       expose:
         - '80'
       volumes_from:
          - oodata
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.236
           aliases:
             - doc
 
     oorabbit:
       container_name: oorabbit
       image: rabbitmq
       restart: always
       expose:
         - '5672'
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.230
           aliases:
             - rabbit
 
     oodb:
       container_name: oodb
       image: postgres:9.5
       environment:
         - POSTGRES_DB=onlyoffice
         - POSTGRES_USER=onlyoffice
         - POSTGRES_PASSWORD=byfinugu8g8un7guhunh33ih3u3ihi4mu
       restart: always
       expose:
         - '5432'
       volumes:
         - oodbdata:/var/lib/postgresql
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.229
           aliases:
             - oodb
 
     colab:
       container_name: colab
       image: collabora/code
       restart: always
       expose:
          - '9980'
       volumes:
         - ./colabloolwsd.xml:/etc/loolwsd/loolwsd.xml:ro
       security_opt:
         - seccomp:unconfined
       cap_add:
         - MKNOD
       tty: true
       dns:
         - 172.22.1.254
       networks:
         cluster-network:
           ipv4_address: 172.22.1.228
           aliases:
             - colab
 
 networks:
   cluster-network:
     driver: bridge
     enable_ipv6: true
     ipam:
       driver: default
       config:
         - subnet: 172.22.1.0/24
         - subnet: fd4d:6169:6c63:6f77::/64
 
 volumes:
   mysql-vol-1:
   redis-vol-1:
   nextcloudapp6:
   documentdata1:
   documentlog1:
   documentcache1:
   oodbdata:

./data/Dockerfiles/unbound/Dockerfile

 FROM alpine:3.6
 
 RUN apk add --update --no-cache \
 	curl \
 	unbound \
 	bash \
 	openssl \
 	drill \
 	&& curl -o /etc/unbound/root.hints https://www.internic.net/domain/named.cache \
 	&& chown root:unbound /etc/unbound \
 	&& chmod 775 /etc/unbound
 
 EXPOSE 53/udp 53/tcp
 
 COPY docker-entrypoint.sh /docker-entrypoint.sh
 
 ENTRYPOINT ["/docker-entrypoint.sh"]

./data/Dockerfiles/unbound/docker-entrypoint.sh

 #!/bin/bash
 
 echo "Receiving anchor key..."
 /usr/sbin/unbound-anchor -a /etc/unbound/trusted-key.key
 echo "Receiving root hints..."
 curl -#o /etc/unbound/root.hints https://www.internic.net/domain/named.cache
 
 exec "$@"

./data/conf/unbound/unbound.conf

 server:
   verbosity: 1
   interface: 0.0.0.0
   interface: ::0
   logfile: /dev/stdout
   do-ip4: yes
   do-ip6: yes
   do-udp: yes
   do-tcp: yes
   do-daemonize: no
   access-control: 172.22.1.0/24 allow
   access-control: fd4d:6169:6c63:6f77::/64 allow
   access-control: 172.16.94.0/24 allow
   access-control: 127.0.0.1 allow
   directory: "/etc/unbound"
   username: unbound
   auto-trust-anchor-file: trusted-key.key
   private-address: 10.0.0.0/8
   private-address: 172.16.0.0/12
   private-address: 192.168.0.0/16
   private-address: 169.254.0.0/16
   private-address: fd00::/8
   private-address: fe80::/10
   private-address: fd4d:6169:6c63:6f77::/64
   root-hints: "/etc/unbound/root.hints"
   hide-identity: yes
   hide-version: yes
   max-udp-size: 4096
   msg-buffer-size: 65552
   local-data: "vm1 IN A 172.16.1.6"
   local-data: "vm3 IN A 172.16.1.7"
   local-data: "vm2 IN A 172.16.1.8"
   local-data: "vm4 IN A 172.16.1.9"
   local-data: "vm4 IN A 172.16.1.10"
   local-data: "me.public IN A 172.16.1.6"

./data/conf/mysql/my.cnf

 [client]
 default-character-set = utf8mb4
 
 [mysql]
 default-character-set = utf8mb4
 
 [galera]
 # Mandatory settings
 wsrep_on=ON
 wsrep_provider=/usr/lib/libgalera_smm.so
 binlog_format=row
 default_storage_engine=InnoDB
 innodb_autoinc_lock_mode=2
 
 # Allow server to accept connections on all interfaces.
 
 bind-address=0.0.0.0
 wsrep_cluster_name             = "galeranc"            # Same Cluster name for all nodes
 #wsrep_cluster_address          = "gcomm://" # too bootstrap cluster on first node
 wsrep_cluster_address          = "gcomm://172.16.1.7,172.16.1.8,172.16.1.9,172.16.1.10"
 wsrep_node_name                = "Node A"                            # Unique node name
 wsrep_node_incoming_address    = 172.16.1.6                            # Our external interface where application comes from
 wsrep_sync_wait                = 1                                   # If you need realy full-synchronous replication (Galera 3.6 and newer)
 wsrep_slave_threads            = 8                                  # 4 - 8 per core, not more than wsrep_cert_deps_distance
 
 wsrep_sst_method               = rsync                               # SST method (initial full sync): mysqldump, rsync, rsync_wan, xtrabackup-v2
 wsrep_sst_auth                 = test4:s5ytby80980
 wsrep_sst_receive_address      = 172.16.1.6

./ncredis.config.php

 <?php
 $CONFIG = array (
   'memcache.local' => '\OC\Memcache\Redis',
   'memcache.distributed' => '\OC\Memcache\Redis',
   'memcache.locking' => '\OC\Memcache\Redis',
   'redis' => array(
     'host' => 'redis',
     'port' => 6379,
   ),
 );

./nextcloud glusterfs fuse mount for data ./nextcloud/data ./nextcloud/config folders on it

./nextcloud/config/config.php

 <?php
 $CONFIG = array (
   'instanceid' => 'ocndhkjj9aj',
   'passwordsalt' => 'V2yuCPhkhkEbj8irXBsIx4kLTu',
   'secret' => 'ERU2Y/yiuRcvBQkGkh9Lk7or0CbxxxxgLS9e',
   'integrity.check.disabled' => true,
   'trusted_domains' => 
   array (
     0 => 'drive.domain.name',
   ),
   'datadirectory' => '/var/www/html/data',
   'apps_paths' => 
   array (
     0 => 
     array (
       'path' => '/var/www/html/apps',
       'url' => '/apps',
       'writable' => false,
     ),
     1 => 
     array (
       'path' => '/var/www/html/custom_apps',
       'url' => '/custom_apps',
       'writable' => true,
     ),
   ),
   'overwrite.cli.url' => 'https://drive.domain.name',
   'dbtype' => 'mysql',
   'version' => '12.0.5.3',
   'dbname' => 'nextcloud',
   'dbhost' => 'mysql',
   'dbport' => '',
   'dbtableprefix' => 'oc_',
   'mysql.utf8mb4' => true,
   'dbuser' => 'nextcloud',
   'dbpassword' => '3ih3u3ihi4muby8g8un7guhunh3finugu',
   'installed' => true,
   'knowledgebaseenabled' => false,
   'logtimezone' => 'America/New_York',
   'cron_log' => true,
   'memcache.locking' => '\\OC\\Memcache\\Redis',
   'memcache.local' => '\\OC\\Memcache\\Redis',
   'memcache.distributed' => '\\OC\\Memcache\\Redis',
   'redis' => 
   array (
     'host' => 'redis',
     'port' => 6379,
   ),
   'trusted_proxies' => 
   array (
     0 => 'fd4d:6169:6c63:6f77::1',
     1 => '172.22.1.0/24',
   ),
   'auth.bruteforce.protection.enabled' => true,
   'mail_smtpmode' => 'smtp',
   'mail_smtpauthtype' => 'PLAIN',
   'mail_from_address' => 'nc',
   'mail_domain' => 'domain.name',
   'mail_smtphost' => '172.22.1.225',
   'app.mail.accounts.default' => 
   array (
     'email' => '%USERID%',
     'imapHost' => 'dovecot',
     'imapPort' => 993,
     'imapUser' => '%USERID%',
     'imapSslMode' => 'ssl',
     'smtpHost' => 'postfix',
     'smtpPort' => 465,
     'smtpUser' => '%USERID%',
     'smtpSslMode' => 'ssl',
   ),
   'mail_smtpauth' => 1,
   'mail_smtpport' => '25',
   'theme' => '',
   'loglevel' => 2,
   'debug' => false,
   'updater.secret' => '$2y$10$8WVrraXic3E4j.skgus877shukhRcUNmm/tx3bWRAIz8b5Q/iau',
   'maintenance' => false,
 );

./ncnginx.conf to use upstream haproxy which handless Https on it'sown (or with cloudflare like content networks)

 user  www-data;
 worker_processes  1;
 
 
 error_log  /var/log/nginx/error.log warn;
 pid        /var/run/nginx.pid;
 
 
 events {
     worker_connections  1024;
 }
 
 http {
     include       /etc/nginx/mime.types;
     default_type  application/octet-stream;
 	
     log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                       '$status $body_bytes_sent "$http_referer" '
                       '"$http_user_agent" "$http_x_forwarded_for"';
 
     access_log  /var/log/nginx/access.log  main;
 
     sendfile        on;
     #tcp_nopush     on;
 
     keepalive_timeout  65;
 	client_body_buffer_size 10K;
     client_header_buffer_size 1k;
     #large_client_header_buffers 2 1k;
 	large_client_header_buffers 4 32k;
     client_body_timeout 24;
     client_header_timeout 24;
     send_timeout 40;
 
 	map $http_host $this_host {
         "" $host;
         default $http_host;
     }
 
     map $http_x_forwarded_proto $the_scheme {
         default $http_x_forwarded_proto;
         "" $scheme;
     }
 
     map $http_x_forwarded_host $the_host {
        default $http_x_forwarded_host;
        "" $this_host;
     }

 upstream php-handler {
     server 172.22.1.238:9000;
     #server unix:/var/run/php5-fpm.sock;
 }
 
 server {
 	listen 81 default_server proxy_protocol;
 	#listen 443 ssl;
     server_name drive.manhattan.computer;
 
     proxy_ignore_client_abort on;
     fastcgi_ignore_client_abort on;
 	
 	#ssl_certificate /var/www/html/config/cert/cert.pem;
     #ssl_certificate_key /var/www/html/config/cert/cert.key;
 	
     add_header X-Content-Type-Options nosniff;
     add_header X-XSS-Protection "1; mode=block";
     add_header X-Robots-Tag none;
     add_header X-Download-Options noopen;
     add_header X-Permitted-Cross-Domain-Policies none;
     #proxy_buffering off;
 
     # Path to the root of your installation
     root /var/www/html;
  
     # If behind reverse proxy, forwards the correct IP
     set_real_ip_from 172.22.1.0/24;
     real_ip_header X-Forwarded-For;
     real_ip_recursive on;
 
     location = /robots.txt {
         allow all;
         log_not_found off;
         access_log off;
     }
 
     location = /.well-known/carddav {
       return 301 $scheme://$host/remote.php/dav;
     }
     location = /.well-known/caldav {
       return 301 $scheme://$host/remote.php/dav;
     }
 
     # set max upload size
     client_max_body_size 0;
     fastcgi_buffers 64 4K;
 
     # Enable gzip but do not remove ETag headers
     gzip on;
     gzip_vary on;
     gzip_comp_level 4;
     gzip_min_length 256;
     gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
     gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
 
     location / {
         rewrite ^ /index.php$uri;
     }
 
     location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
         deny all;
     }
     location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) {
         deny all;
     }
     # static files
     location ^~ /loleaflet {
         proxy_pass http://172.22.1.228:9980;
         proxy_set_header Host $http_host;
 		add_header X-XSS-Protection ""; #it adds anoter one anyway somewhere inside lol
     }
 
     # WOPI discovery URL
     location ^~ /hosting/discovery {
         proxy_pass http://172.22.1.228:9980;
         proxy_set_header Host $http_host;
     }
 
     # main websocket
     location ~ ^/lool/(.*)/ws$ {
         proxy_pass http://172.22.1.228:9980;
         proxy_set_header Upgrade $http_upgrade;
         proxy_set_header Connection "Upgrade";
         proxy_set_header Host $http_host;
         proxy_read_timeout 36000s;
     }
 
     # download, presentation and image upload
     location ~ ^/lool {
         proxy_pass http://172.22.1.228:9980;
         proxy_set_header Host $http_host;
     }
 
     # Admin Console websocket
     location ^~ /lool/adminws {
         proxy_pass http://172.22.1.228:9980;
         proxy_set_header Upgrade $http_upgrade;
         proxy_set_header Connection "Upgrade";
         proxy_set_header Host $http_host;
         proxy_read_timeout 36000s;
     }
 
 	location ~* ^/ds-vpath/ {
 		rewrite /ds-vpath/(.*) /$1  break;
                 proxy_pass http://172.22.1.236;
                 proxy_redirect     off;
 
                 client_max_body_size 100m;
 
                 proxy_http_version 1.1;
                 proxy_set_header Upgrade $http_upgrade;
                 proxy_set_header Connection "upgrade";
                 proxy_set_header Host $http_host;
                 proxy_set_header X-Real-IP $remote_addr;
                 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                 proxy_set_header X-Forwarded-Host $the_host/ds-vpath;
                 proxy_set_header X-Forwarded-Proto $the_scheme;
     }
 
     location ~ ^/(?:index|remote|public|cron|core/ajax/update|status|ocs/v[12]|updater/.+|ocs-provider/.+)\.php(?:$|/) {
         fastcgi_split_path_info ^(.+\.php)(/.*)$;
         include fastcgi_params;
         fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
         fastcgi_param PATH_INFO $fastcgi_path_info;
         fastcgi_param HTTPS on;
         #Avoid sending the security headers twice
         fastcgi_param modHeadersAvailable true;
         fastcgi_param front_controller_active true;
         #add_header X-debug-message "A php $document_root $fastcgi_script_name $fastcgi_path_info" always; 
         fastcgi_pass php-handler;
         fastcgi_intercept_errors on;
         fastcgi_request_buffering off;
     }
 
     location ~ ^/(?:updater|ocs-provider)(?:$|/) {
         try_files $uri/ =404;
         index index.php;
     }
 
     # Adding the cache control header for js and css files
     # Make sure it is BELOW the PHP block
     location ~ \.(?:css|js|woff|svg|gif)$ {
         try_files $uri /index.php$uri$is_args$args;
         add_header Cache-Control "public, max-age=15778463";
         add_header X-Content-Type-Options nosniff;
         add_header X-XSS-Protection "1; mode=block";
         add_header X-Robots-Tag none;
         add_header X-Download-Options noopen;
         add_header X-Permitted-Cross-Domain-Policies none;
         # Optional: Don't log access to assets
         access_log off;
     }
 
     location ~ \.(?:png|html|ttf|ico|jpg|jpeg)$ {
         try_files $uri /index.php$uri$is_args$args;
         # Optional: Don't log access to other assets
         access_log off;
     }
 }
 }

./colabloolwsd.xml

 <config>
 
     <!-- Note: 'default' attributes are used to document a setting's default value as well as to use as fallback. -->
     <!-- Note: When adding a new entry, a default must be set in WSD in case the entry is missing upon deployment. -->
 
     <tile_cache_path desc="Path to a directory where to keep the tile cache." type="path" relative="false" default="/var/cache/loolwsd"></tile_cache_path>
     <sys_template_path desc="Path to a template tree with shared libraries etc to be used as source for chroot jails for child processes." type="path" relative="true" default="systemplate"></sys_template_path>
     <lo_template_path desc="Path to a LibreOffice installation tree to be copied (linked) into the jails for child processes. Should be on the same file system as systemplate." type="path" relative="false" default="/opt/collaboraoffice5.3"></lo_template_path>
     <child_root_path desc="Path to the directory under which the chroot jails for the child processes will be created. Should be on the same file system as systemplate and lotemplate. Must be an empty directory." type="path" relative="true" default="jails"></child_root_path>
 
     <server_name desc="Hostname:port of the server running loolwsd. If empty, it's derived from the request." type="string" default=""></server_name>
     <file_server_root_path desc="Path to the directory that should be considered root for the file server. This should be the directory containing loleaflet." type="path" relative="true" default="loleaflet/../"></file_server_root_path>
 
     <memproportion desc="The maximum percentage of system memory consumed by all of the LibreOffice Online, after which we start cleaning up idle documents" type="double" default="80.0"></memproportion>
     <num_prespawn_children desc="Number of child processes to keep started in advance and waiting for new clients." type="uint" default="1">1</num_prespawn_children>
     <per_document desc="Document-specific settings, including LO Core settings.">
         <max_concurrency desc="The maximum number of threads to use while processing a document." type="uint" default="4">4</max_concurrency>
         <idle_timeout_secs desc="The maximum number of seconds before unloading an idle document. Defaults to 1 hour." type="uint" default="3600">3600</idle_timeout_secs>
         <!-- Idle save and auto save are checked every 30 seconds -->
         <idlesave_duration_secs desc="The number of idle seconds after which document, if modified, should be saved. Defaults to 30 seconds." type="uint" default="30">30</idlesave_duration_secs>
         <autosave_duration_secs desc="The number of seconds after which document, if modified, should be saved. Defaults to 5 minutes." type="uint" default="300">300</autosave_duration_secs>
         <limit_virt_mem_kb desc="The maximum virtual memory allowed to each document process. 0 for unlimited, 1700 min." type="uint">0</limit_virt_mem_kb>
         <limit_data_mem_kb desc="The maximum memory data segment allowed to each document process. 0 for unlimited." type="uint">0</limit_data_mem_kb>
         <limit_stack_mem_kb desc="The maximum stack size allowed to each document process. 0 for unlimited." type="uint">8000</limit_stack_mem_kb>
         <limit_file_size_mb desc="The maximum file size allowed to each document process to write. 0 for unlimited." type="uint">0</limit_file_size_mb>
         <limit_num_open_files desc="The maximum number of files allowed to each document process to open. 0 for unlimited." type="uint">0</limit_num_open_files>
     </per_document>
 
     <per_view desc="View-specific settings.">
         <out_of_focus_timeout_secs desc="The maximum number of seconds before dimming and stopping updates when the browser tab is no longer in focus. Defaults to 60 seconds." type="uint" default="60">60</out_of_focus_timeout_secs>
         <idle_timeout_secs desc="The maximum number of seconds before dimming and stopping updates when the user is no longer active (even if the browser is in focus). Defaults to 15 minutes." type="uint" default="900">900</idle_timeout_secs>
     </per_view>
 
     <loleaflet_html desc="Allows UI customization by replacing the single endpoint of loleaflet.html" type="string" default="loleaflet.html">loleaflet.html</loleaflet_html>
 
     <logging>
         <color type="bool">true</color>
         <level type="string" desc="Can be 0-8, or none (turns off logging), fatal, critical, error, warning, notice, information, debug, trace" default="warning">warning</level>
         <file enable="false">
             <property name="path" desc="Log file path.">/var/log/loolwsd.log</property>
             <property name="rotation" desc="Log file rotation strategy. See Poco FileChannel.">never</property>
             <property name="archive" desc="Append either timestamp or number to the archived log filename.">timestamp</property>
             <property name="compress" desc="Enable/disable log file compression.">true</property>
             <property name="purgeAge" desc="The maximum age of log files to preserve. See Poco FileChannel.">10 days</property>
             <property name="purgeCount" desc="The maximum number of log archives to preserve. Use 'none' to disable purging. See Poco FileChannel.">10</property>
             <property name="rotateOnOpen" desc="Enable/disable log file rotation on opening.">true</property>
             <property name="flush" desc="Enable/disable flushing after logging each line. May harm performance. Note that without flushing after each line, the log lines from the different processes will not appear in chronological order.">false</property>
         </file>
     </logging>
 
     <loleaflet_logging desc="Logging in the browser console" default="false">false</loleaflet_logging>
 
     <trace desc="Dump commands and notifications for replay. When 'snapshot' is true, the source file is copied to the path first." enable="true">
         <path desc="Output path to hold trace file and docs. Use '%' for timestamp to avoid overwriting." compress="true" snapshot="false">/tmp/looltrace-%.gz</path>
         <filter>
             <message desc="Regex pattern of messages to exclude"></message>
         </filter>
         <outgoing>
             <record desc="Whether or not to record outgoing messages" default="false">false</record>
         </outgoing>
     </trace>
 
     <ssl desc="SSL settings">
         <enable type="bool" default="true">false</enable>
         <termination desc="Connection via proxy where loolwsd acts as working via https, but actually uses http." type="bool" default="true">true</termination>
         <cert_file_path desc="Path to the cert file" relative="false">/etc/loolwsd/cert.pem</cert_file_path>
         <key_file_path desc="Path to the key file" relative="false">/etc/loolwsd/key.pem</key_file_path>
         <ca_file_path desc="Path to the ca file" relative="false">/etc/loolwsd/ca-chain.cert.pem</ca_file_path>
         <cipher_list desc="List of OpenSSL ciphers to accept" default="ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"></cipher_list>
         <hpkp desc="Enable HTTP Public key pinning" enable="false" report_only="false">
             <max_age desc="HPKP's max-age directive - time in seconds browser should remember the pins" enable="true">1000</max_age>
             <report_uri desc="HPKP's report-uri directive - pin validation failure are reported at this URL" enable="false"></report_uri>
             <pins desc="Base64 encoded SPKI fingerprints of keys to be pinned">
             <pin></pin>
             </pins>
         </hpkp>
     </ssl>
 
     <storage desc="Backend storage">
         <filesystem allow="false" />
         <wopi desc="Allow/deny wopi storage. Mutually exclusive with webdav." allow="true">
             <host desc="Regex pattern of hostname to allow or deny." allow="true">drive\.domain\.name</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">colab</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">192\.168\.1\.1</host>
             <host desc="Regex pattern of hostname to allow or deny." allow="true">localhost</host>
             <max_file_size desc="Maximum document size in bytes to load. 0 for unlimited." type="uint">0</max_file_size>
         </wopi>
         <webdav desc="Allow/deny webdav storage. Mutually exclusive with wopi." allow="false">
             <host desc="Hostname to allow" allow="false">vm1</host>
         </webdav>
     </storage>
 
     <tile_cache_persistent desc="Should the tiles persist between two editing sessions of the given document?" type="bool" default="true">true</tile_cache_persistent>
 
     <admin_console desc="Web admin console settings.">
         <username desc="The username of the admin console. Must be set.">admin</username>
         <password desc="The password of the admin console. Must be set.">srt286Wg6</password>
     </admin_console>
 
 </config>

notes:

  • ooXXXXX containers are onlyoffice deployment to edit docx;

  • colab is collabora deployment to edit other documents;

  • gluster fuse mount in fstab vm1:/nextcloud /home/user/pathtoworkdir/nextcloud glusterfs defaults,_netdev,negative-timeout=10,attribute-timeout=30,fopen-keep-cache,direct-io-mode=enable,fetch-attempts=5 0 0

  • gluster version 2.12.5 with volume optimizations

    server.outstanding-rpc-limit: 128
    cluster.choose-local: on
    cluster.nufa: on
    performance.cache-size: 2GB
    performance.enable-least-priority: off
    performance.cache-refresh-timeout: 10
    network.inode-lru-limit: 90000
    performance.cache-invalidation: on
    performance.stat-prefetch: on
    features.cache-invalidation-timeout: 600
    features.cache-invalidation: on
    server.event-threads: 8
    performance.md-cache-timeout: 600
    performance.client-io-threads: on
    client.event-threads: 8
    cluster.lookup-optimize: on
    performance.nl-cache-timeout: 600
    performance.nl-cache: on
    cluster.lookup-unhashed: auto
    performance.strict-o-direct: on
    cluster.read-hash-mode: 0
    cluster.readdir-optimize: on
    performance.rda-cache-limit: 128MB
    cluster.shd-max-threads: 12
    
  • for onlyoffice and collabora not to overlap on file extensions update collabora Nextcloud app file /var/www/html/custom_apps/richdocuments/js/viewer/viewer.js not to include docx and other ...x file types

    /* globals FileList, OCA.Files.fileActions, oc_debug */
    var odfViewer = {
       isDocuments : false,
       supportedMimes: [
             'application/vnd.oasis.opendocument.text',
             'application/vnd.oasis.opendocument.spreadsheet',
             'application/vnd.oasis.opendocument.graphics',
             'application/vnd.oasis.opendocument.presentation',
             'application/vnd.lotus-wordpro',
             'image/svg+xml',
             'application/vnd.visio',
             'application/vnd.wordperfect',
             'application/msonenote',
             'application/msword',
             'application/rtf',
             'text/rtf',
             'application/vnd.ms-word.document.macroEnabled.12',
             'application/vnd.ms-word.template.macroEnabled.12',
             'application/vnd.ms-excel',
             'application/vnd.ms-excel.sheet.macroEnabled.12',
             'application/vnd.ms-excel.template.macroEnabled.12',
             'application/vnd.ms-excel.addin.macroEnabled.12',
             'application/vnd.ms-excel.sheet.binary.macroEnabled.12',
             'application/vnd.ms-powerpoint',
             'application/vnd.ms-powerpoint.addin.macroEnabled.12',
             'application/vnd.ms-powerpoint.presentation.macroEnabled.12',
             'application/vnd.ms-powerpoint.template.macroEnabled.12',
             'application/vnd.ms-powerpoint.slideshow.macroEnabled.12'
     ],
     ................................
    
  • upstream haproxy-docker.ylm

     haproxy:
        container_name: ha
        depends_on:
          - nginx-cluster
        image: haproxy:1.8.3
        volumes:
          - /var/run/docker.sock:/var/run/docker.sock
          - ./haproxy:/usr/local/etc/haproxy:rw
        ports:
          - "443:443"
          - "80:80"
          - "40036:40036" #statistics
        restart: always
    

./haproxy/haproxy.cfg

  global
     nbproc 2
     cpu-map 1 0
     cpu-map 2 1
     log 127.0.0.1 local0
     log 127.0.0.1 local1 notice
     log-send-hostname
     maxconn 4096
     pidfile /var/run/haproxy.pid
     daemon
     stats socket /var/run/haproxy.stats level admin
     ssl-default-bind-options no-sslv3
     ssl-default-bind-ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DHE-DSS-AES128-SHA:DES-CBC3-SHA
     tune.ssl.default-dh-param 2048
   defaults
     balance roundrobin
     log global
     mode http
     option redispatch
     option httplog
     option dontlognull
     option forwardfor
     option http-server-close
     timeout connect 5000
     timeout client 50000
     timeout server 50000
   listen stats
     bind :40036
     mode http
     stats enable
     timeout connect 10s
     timeout client 1m
     timeout server 1m
     stats hide-version
     stats realm Haproxy\ Statistics
     stats uri /
     stats auth hello:ljdklu787jodd98dp
   frontend default_port_80
     bind :80
     redirect scheme https code 301
   frontend default_port_443
     mode http
     bind :443 ssl crt /usr/local/etc/haproxy/cert/ alpn h2,http/1.1
     http-response set-header Strict-Transport-Security "max-age=16000000; includeSubDomains"
     timeout client 3540000
     default_backend default_bk
   backend default_bk
     mode http
     option forwardfor
     http-response set-header Strict-Transport-Security max-age=31536000;includeSubDomains
     http-request set-header X-Forwarded-Proto https if { ssl_fc }
     http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
     server nextccloudweb1 nextccloudweb1:81 send-proxy check inter 2000 rise 2 fall 3
     .................
⚠️ **GitHub.com Fallback** ⚠️