This commit is contained in:
root 2024-09-03 23:02:49 -04:00
commit 2cf6e7f8e5
14 changed files with 882 additions and 0 deletions

View file

@ -0,0 +1,50 @@
services:
http3-proxy:
image: git.nadeko.net/fijxu/http3-proxy:latest
container_name: invidious-http3-proxy
restart: unless-stopped
environment:
DISABLE_WEBP: 1
#volumes:
#- http3-socket:/app/socket:rw
deploy:
replicas: 2
# http3-proxy-test:
# image: git.nadeko.net/fijxu/http3-proxy:latest
# restart: unless-stopped
# environment:
# DISABLE_WEBP: 1
# volumes:
# - http3-proxy-socket:/app/socket:rw
# http3-proxy-nginx:
# image: nginx:latest
# restart: unless-stopped
# volumes:
# - ./nginx.conf:/etc/nginx/nginx.conf:ro
# depends_on:
# - http3-proxy
# ports:
# - "127.0.0.1:10012:3000"
# - "100.64.0.4:10012:3000"
#
# http3-proxy-nginx-unix:
# image: nginx:latest
# restart: unless-stopped
# volumes:
# - ./nginx-unix.conf:/etc/nginx/nginx.conf:ro
# - type: bind
# source: /run/http3-proxy-nginx
# target: /tmp
# depends_on:
# - http3-proxy
networks:
default:
name: invidious
external: true
volumes:
http3-proxy-socket:
name: "http3-proxy-socket"

View file

@ -0,0 +1,21 @@
user www-data;
events {
worker_connections 1024;
}
http {
server {
listen unix:/tmp/socket.sock;
access_log off;
error_log /var/log/nginx/error.log;
location / {
resolver 127.0.0.11;
set $backend "http3-proxy";
proxy_buffering off;
proxy_pass http://$backend:8080;
proxy_http_version 1.1; # to keep alive
proxy_set_header Connection ""; # to keep alive
}
}
}

View file

@ -0,0 +1,22 @@
user www-data;
events {
worker_connections 1024;
}
http {
server {
listen 3000;
listen [::]:3000;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
location / {
resolver 127.0.0.11;
set $backend "http3-proxy";
proxy_buffering off;
proxy_pass http://$backend:8080;
proxy_http_version 1.1; # to keep alive
proxy_set_header Connection ""; # to keep alive
}
}
}

View file

@ -0,0 +1,16 @@
services:
inv_sig_helper:
image: quay.io/invidious/inv-sig-helper
container_name: invidious-signature-helper
restart: unless-stopped
init: true
command: ["--tcp", "0.0.0.0:12999"]
environment:
- RUST_LOG=info
deploy:
replicas: 4
networks:
default:
name: invidious
external: true

View file

@ -0,0 +1,15 @@
events {
worker_connections 1024;
}
stream {
upstream sig {
server inv_sig_helper:12999;
}
server {
listen 3001;
listen [::]:3001;
#access_log off;
resolver 127.0.0.11;
proxy_pass sig;
}
}

View file

@ -0,0 +1,410 @@
# Docker compose file for inv.nadeko.net
services:
valkey:
image: valkey/valkey:7.2-alpine
container_name: invidious-valkey
restart: unless-stopped
volumes:
- invidious-valkey:/data"
invidious-refresher:
image: "git.nadeko.net/fijxu/invidious:${TAG}"
container_name: invidious-refresher
restart: unless-stopped
volumes:
- ./config/config-refresher.yml:/etc/invidious/config.yml:ro
- /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
environment:
INVIDIOUS_CONFIG_FILE: /etc/invidious/config.yml
depends_on:
- valkey
- invidious_pgbouncer
# INVIDIOUS CLEARNET
invidious:
env_file:
- .env
image: "git.nadeko.net/fijxu/invidious:${TAG}"
container_name: invidious
restart: unless-stopped
deploy:
replicas: 6
volumes:
- /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
environment:
INVIDIOUS_CONFIG: |
feed_threads: 0
log_level: Warn
db:
dbname: "${PGSQL_DBNAME}"
user: "${PGSQL_USER}"
password: "${PGSQL_PASS}"
host: invidious-pgbouncer
port: 5432
redis_url: tcp://valkey:6379
donation_url: "https://nadeko.net/donate"
contact_url: "https://nadeko.net/contact"
home_domain: "nadeko.net"
materialious_domain: "materialious.nadeko.net"
full_refresh: false
https_only: true
hmac_key: "${HMAC_KEY}"
domain: "inv.nadeko.net"
use_pubsub_feeds: true
popular_enabled: true
captcha_enabled: true
login_enabled: true
registration_enabled: true
statistics_enabled: true
external_port: 443
default_user_preferences:
annotations: true
disable_proxy: ["downloads"]
annotations_subscribed: true
autoplay: true
captions: ["English", "Spanish", "English (auto-generated)"]
comments: ["youtube", ""]
dark_mode: auto
latest_only: false
listen: false
locale: en-US
watch_history: true
max_results: 60
notifications_only: false
banner: "${BANNER}"
player_style: invidious
quality: hd720
quality_dash: auto
default_home: Popular
feed_menu: ["Popular", "Trending", "Subscriptions", "Playlists"]
automatic_instance_redirect: true
region: CL
sort: published
extend_desc: true
save_player_pos: true
cache_annotations: true
hsts: true
enable_user_notifications: false
modified_source_code_url: "${MODIFIED_SOURCE_CODE}"
force_resolve: ipv4
pool_size: 100
use_innertube_for_captions: true
jobs:
refresh_channels:
enable: false
subscribe_to_feeds:
enable: false
po_token: "${PO_TOKEN}"
visitor_data: "${VISITOR_DATA}"
signature_server: "inv_sig_helper:12999"
depends_on:
- valkey
- invidious-pgbouncer
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/trending || exit 1
interval: 30s
timeout: 5s
retries: 2
# INVIDIOUS TOR
invidious-tor:
env_file:
- .env
image: "git.nadeko.net/fijxu/invidious:${TAG}"
container_name: invidious-tor
restart: unless-stopped
deploy:
replicas: 2
#volumes:
#- /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
environment:
INVIDIOUS_CONFIG: |
feed_threads: 0
log_level: Debug
db:
dbname: "${PGSQL_DBNAME}"
user: "${PGSQL_USER}"
password: "${PGSQL_PASS}"
host: invidious-pgbouncer
port: 5432
redis_url: tcp://valkey:6379
donation_url: "https://nadeko.net/donate"
contact_url: "https://nadeko.net/contact"
home_domain: "nadeko.net"
materialious_domain: "materialious.nadeko.net"
full_refresh: false
https_only: false
hmac_key: "${HMAC_KEY}"
domain: "inv.nadekonw7plitnjuawu6ytjsl7jlglk2t6pyq6eftptmiv3dvqndwvyd.onion"
use_pubsub_feeds: true
popular_enabled: true
captcha_enabled: true
login_enabled: true
registration_enabled: true
statistics_enabled: true
external_port: 443
default_user_preferences:
annotations: true
disable_proxy: ["downloads"]
annotations_subscribed: true
autoplay: true
captions: ["English", "Spanish", "English (auto-generated)"]
comments: ["youtube", ""]
dark_mode: auto
latest_only: false
listen: false
locale: en-US
watch_history: true
max_results: 60
notifications_only: false
banner: "${TOR_BANNER}"
player_style: invidious
quality: dash
quality_dash: auto
default_home: Popular
feed_menu: ["Popular", "Trending", "Subscriptions", "Playlists"]
automatic_instance_redirect: false
region: CL
sort: published
extend_desc: true
save_player_pos: true
cache_annotations: true
hsts: false
enable_user_notifications: false
modified_source_code_url: "${MODIFIED_SOURCE_CODE}"
force_resolve: ipv4
pool_size: 100
use_innertube_for_captions: true
jobs:
refresh_channels:
enable: false
refresh_feeds:
enable: false
subscribe_to_feeds:
enable: false
po_token: "${PO_TOKEN}"
visitor_data: "${VISITOR_DATA}"
signature_server: "inv_sig_helper:12999"
depends_on:
- valkey
- invidious-pgbouncer
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/trending || exit 1
interval: 30s
timeout: 5s
retries: 2
# INVIDIOUS I2P
invidious-i2p:
env_file:
- .env
image: "git.nadeko.net/fijxu/invidious:${TAG}"
container_name: invidious-i2p
restart: unless-stopped
deploy:
replicas: 2
volumes:
- /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
environment:
INVIDIOUS_CONFIG: |
feed_threads: 0
log_level: "Off"
db:
dbname: "${PGSQL_DBNAME}"
user: "${PGSQL_USER}"
password: "${PGSQL_PASS}"
host: invidious-pgbouncer
port: 5432
redis_url: tcp://valkey:6379
donation_url: "https://nadeko.net/donate"
contact_url: "https://nadeko.net/contact"
home_domain: "nadeko.net"
materialious_domain: "materialious.nadeko.net"
full_refresh: false
https_only: false
hmac_key: "${HMAC_KEY}"
domain: "inv.zzls.i2p"
use_pubsub_feeds: true
popular_enabled: true
captcha_enabled: true
login_enabled: true
registration_enabled: true
statistics_enabled: true
external_port: 443
default_user_preferences:
annotations: true
disable_proxy: ["downloads"]
annotations_subscribed: true
autoplay: true
captions: ["English", "Spanish", "English (auto-generated)"]
comments: ["youtube", ""]
dark_mode: auto
latest_only: false
listen: false
locale: en-US
watch_history: true
max_results: 60
notifications_only: false
banner: "${I2P_BANNER}"
player_style: invidious
quality: dash
quality_dash: auto
default_home: Popular
feed_menu: ["Popular", "Trending", "Subscriptions", "Playlists"]
automatic_instance_redirect: false
region: CL
sort: published
extend_desc: true
save_player_pos: true
cache_annotations: true
hsts: false
enable_user_notifications: false
modified_source_code_url: "${MODIFIED_SOURCE_CODE}"
force_resolve: ipv4
pool_size: 100
use_innertube_for_captions: true
jobs:
refresh_channels:
enable: false
refresh_feeds:
enable: false
subscribe_to_feeds:
enable: false
po_token: "${PO_TOKEN}"
visitor_data: "${VISITOR_DATA}"
signature_server: "inv_sig_helper:12999"
depends_on:
- valkey
- invidious-pgbouncer
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/trending || exit 1
interval: 30s
timeout: 5s
retries: 2
#https://github.com/Sommerwiesel/invidious-nerdvpn/blob/nerdvpn/docker-compose.yml#L438
# INVIDIOUS PBBOUNCER
# Thanks nerdvpn
invidious-pgbouncer:
env_file:
- .env
image: edoburu/pgbouncer
container_name: invidious-pgbouncer
environment:
- DB_HOST=/var/run/postgresql
- DB_PORT=5432
- DB_USER=$PGSQL_USER
- DB_PASSWORD=$PGSQL_PASS
- DB_NAME=$PGSQL_DBNAME
- ADMIN_USERS=pgbouncer
- POOL_MODE=transaction
# - DEFAULT_POOL_SIZE=80
# - MIN_POOL_SIZE=20
# - RESERVE_POOL_SIZE=80
# - RESERVE_POOL_TIMEOUT=5
# - MAX_DB_CONNECTIONS=160
# - MAX_USER_CONNECTIONS=160
- MAX_CLIENT_CONN=1000
- AUTH_TYPE=scram-sha-256
- IGNORE_STARTUP_PARAMETERS=extra_float_digits
volumes:
- ./pgbounceruserlist.txt:/etc/pgbouncer/userlist.txt:rw
- /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB -h /var/run/postgresql"]
interval: 5m
timeout: 15s
retries: 2
# MATERIALIOUS
materialious:
image: wardpearce/materialious:latest
container_name: materialious
restart: unless-stopped
ports:
- 127.0.0.1:10013:80
environment:
VITE_DEFAULT_DEARROW_THUMBNAIL_INSTANCE: "https://dearrow-thumb.ajay.app"
#VITE_DEFAULT_SYNCIOUS_INSTANCE: "https://syncious.nadeko.net"
VITE_DEFAULT_RETURNYTDISLIKES_INSTANCE: "https://returnyoutubedislikeapi.com"
VITE_DEFAULT_PEERJS_PATH: "/"
VITE_DEFAULT_PEERJS_HOST: "peerjs.nadeko.net"
VITE_DEFAULT_SPONSERBLOCK_INSTANCE: "https://sponsor.ajay.app"
VITE_DEFAULT_DEARROW_INSTANCE: "https://sponsor.ajay.app"
VITE_DEFAULT_INVIDIOUS_INSTANCE: "https://inv.nadeko.net"
VITE_DEFAULT_PEERJS_PORT: 443
VITE_DEFAULT_SETTINGS: '{"darkMode": true, "themeColor": "#FFB3FD", "autoPlay": true, "alwaysLoop": false, "proxyVideos": false, "listenByDefault": false, "savePlaybackPosition": true, "dashEnabled": true, "theatreModeByDefault": false, "autoplayNextByDefault": true, "returnYtDislikes": true, "searchSuggestions": true, "previewVideoOnHover": true, "sponsorBlock": true, "sponsorBlockCategories": "sponsor,interaction,selfpromo", "deArrowEnabled": false, "playerMiniPlayer": true, "syncious": false}'
# MATERIALIOUS TOR
materialious-tor:
image: wardpearce/materialious:latest
container_name: materialious-tor
restart: unless-stopped
ports:
- 127.0.0.1:10070:80
environment:
VITE_DEFAULT_DEARROW_THUMBNAIL_INSTANCE: "https://dearrow-thumb.ajay.app"
#VITE_DEFAULT_SYNCIOUS_INSTANCE: "https://syncious.nadeko.net"
VITE_DEFAULT_RETURNYTDISLIKES_INSTANCE: "https://returnyoutubedislikeapi.com"
VITE_DEFAULT_PEERJS_PATH: "/"
VITE_DEFAULT_PEERJS_HOST: "peerjs.nadeko.net"
VITE_DEFAULT_SPONSERBLOCK_INSTANCE: "https://sponsor.ajay.app"
VITE_DEFAULT_DEARROW_INSTANCE: "https://sponsor.ajay.app"
VITE_DEFAULT_INVIDIOUS_INSTANCE: "http://inv.nadekonw7plitnjuawu6ytjsl7jlglk2t6pyq6eftptmiv3dvqndwvyd.onion"
VITE_DEFAULT_PEERJS_PORT: 443
VITE_DEFAULT_SETTINGS: '{"darkMode": true, "themeColor": "#FFB3FD", "autoPlay": true, "alwaysLoop": false, "proxyVideos": false, "listenByDefault": false, "savePlaybackPosition": true, "dashEnabled": true, "theatreModeByDefault": false, "autoplayNextByDefault": true, "returnYtDislikes": true, "searchSuggestions": true, "previewVideoOnHover": true, "sponsorBlock": true, "sponsorBlockCategories": "sponsor,interaction,selfpromo", "deArrowEnabled": false, "playerMiniPlayer": true, "syncious": false}'
# API EXTENDED IS BROKEN
# api_extended:
# env_file:
# - .env
# image: wardpearce/invidious_api_extended:latest
# restart: unless-stopped
# ports:
# - 127.0.0.1:10014:80
# volumes:
# - /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
# environment:
# api_extended_postgre: "${PGSQL_DB}"
# api_extended_allowed_origins: '["https://materialious.nadeko.net/"]'
# api_extended_debug: false
# api_extended_invidious_instance: "https://invidious.nadeko.net"
# api_extended_production_instance: "https://syncious.nadeko.net"
peerjs:
image: peerjs/peerjs-server:latest
container_name: invidious-peerjs
restart: unless-stopped
ports:
- 127.0.0.1:10015:9000
# Without depends because haproxy is smart
haproxy:
image: haproxy:lts-alpine
container_name: invidious-haproxy
ports:
# Invidious
- "127.0.0.1:10011:8001"
# TOR
- "127.0.0.1:10098:8002"
# I2P
- "127.0.0.1:10099:8003"
# HTTP3-PROXY
- "127.0.0.1:10012:8004"
# METRICS
- "8404:8404"
# STATS
- "127.0.0.1:8405:8405"
volumes:
- ./haproxy:/usr/local/etc/haproxy
- /run/invidious-haproxy:/tmp:rw
#- http3-proxy-socket:/http3-socket:rw
networks:
default:
name: invidious
volumes:
invidious-valkey:

View file

@ -0,0 +1,111 @@
global
# This because haproxy is dumb and likes to use the maxconnection from the kernel and that is super mega huge making haproxy oom
maxconn 32768
# 777 perms for all sockets
unix-bind mode 777
stats socket ipv4@0.0.0.0:8405 level admin
resolvers docker
# To be able to use multiple replicas of invidious
# using domains
nameserver dns1 127.0.0.11:53
defaults
mode http
timeout client 10s
timeout connect 10s
timeout server 15s
timeout http-request 10s
frontend prometheus
bind *:8404
mode http
http-request use-service prometheus-exporter if { path /metrics }
no log
frontend stats
mode http
bind *:8405
stats enable
stats uri /stats
stats refresh 1s
stats admin if TRUE
cache invidious-thumbnail-cache
# 96MB
total-max-size 96
# 32kB
max-object-size 32768
# 30min
max-age 1800
process-vary on
# cache api-cache
# # 32MB
# total-max-size 32
# # 4kB
# max-object-size 32000
# # 5min
# max-age 300
# process-vary on
frontend invidious
# http-request set-var(txn.api) path
bind /tmp/invidious.sock
bind *:8001
#filter cache invidious-api-cache
#http-request cache-use invidious-api-cache if { path_beg /api/v1 }
#http-response cache-store invidious-api-cache
default_backend invidious
frontend invidious-tor
bind /tmp/invidious-tor.sock
bind *:8002
default_backend invidious-tor
frontend invidious-i2p
bind /tmp/invidious-i2p.sock
bind *:8003
default_backend invidious-i2p
frontend http3-proxy
http-request set-var(txn.vipath) path
# acl vi path_beg /vi/
bind /tmp/http3-proxy.sock
bind *:8004
#http-response set-header Cache-Control public
#http-response set-header Expires -1
#http-response set-header Pragma cache
# acl youtube_thumbnails path_beg /vi/
#
default_backend http3-proxy
backend invidious
balance leastconn
#http-request cache-use api-cache if { path_beg /api/v1/channels/ }
#http-response cache-store api-cache
# acl inv_api var(txn.api) -m beg /api/
# http-response set-header X-Cache-Status HIT if !{ srv_id -m found } inv_api
# http-response set-header X-Cache-Status MISS if { srv_id -m found } inv_api
server-template invidious 8 invidious:3000 check resolvers docker init-addr libc,none
backend invidious-tor
server-template invidious-tor 2 invidious-tor:3000 check resolvers docker init-addr libc,none
backend invidious-i2p
server-template invidious-i2p 2 invidious-i2p:3000 check resolvers docker init-addr libc,none
backend http3-proxy
# Unix socket for http3-proxy for later
#server http3-proxy-unix-socket /http3-proxy/http-proxy.sock
# balance source
http-request cache-use invidious-thumbnail-cache if { path_beg /vi/ }
http-response cache-store invidious-thumbnail-cache
# https://discourse.haproxy.org/t/http-response-set-header-with-condition-not-working/3108
acl youtube_thumbnails var(txn.vipath) -m beg /vi/
http-response set-header X-Cache-Status HIT if !{ srv_id -m found } youtube_thumbnails
http-response set-header X-Cache-Status MISS if { srv_id -m found } youtube_thumbnails
server http3-proxy-1 http3-proxy:8080 check resolvers docker init-addr libc,none
server http3-proxy-2 http3-proxy:8080 check resolvers docker init-addr libc,none

View file

@ -0,0 +1,23 @@
user www-data;
events {
worker_connections 1024;
}
http {
upstream inv-i2p {
server invidious-i2p:3008;
}
server {
listen 3000;
listen [::]:3000;
access_log off;
location / {
resolver 127.0.0.11;
proxy_buffering off;
proxy_request_buffering off;
proxy_cache off;
proxy_pass http://inv-i2p;
proxy_http_version 1.1; # to keep alive
proxy_set_header Connection ""; # to keep alive
}
}
}

View file

@ -0,0 +1,23 @@
user www-data;
events {
worker_connections 1024;
}
http {
upstream inv-tor {
server invidious-tor:3009;
}
server {
listen 3000;
listen [::]:3000;
access_log off;
location / {
resolver 127.0.0.11;
proxy_buffering off;
proxy_request_buffering off;
proxy_cache off;
proxy_pass http://inv-tor;
proxy_http_version 1.1; # to keep alive
proxy_set_header Connection ""; # to keep alive
}
}
}

View file

@ -0,0 +1,23 @@
user www-data;
events {
worker_connections 1024;
}
http {
upstream inv {
server invidious:3000;
server invidious-buh:3000;
}
server {
listen unix:/tmp/socket.sock;
access_log off;
location / {
resolver 127.0.0.11;
proxy_buffering off;
proxy_request_buffering off;
proxy_cache off;
proxy_pass http://inv;
proxy_http_version 1.1; # to keep alive
proxy_set_header Connection ""; # to keep alive
}
}
}

View file

@ -0,0 +1,24 @@
user www-data;
events {
worker_connections 1024;
}
http {
upstream inv {
server invidious:3000;
server invidious-buh:3000;
}
server {
listen 3000;
listen [::]:3000;
access_log off;
location / {
resolver 127.0.0.11;
proxy_buffering off;
proxy_request_buffering off;
proxy_cache off;
proxy_pass http://inv;
proxy_http_version 1.1; # to keep alive
proxy_set_header Connection ""; # to keep alive
}
}
}

View file

@ -0,0 +1,35 @@
services:
peertube:
image: chocobozzz/peertube:production-bookworm
# build:
# context: .
# args:
# VERSION: production
env_file:
- .env
ports:
- "0.0.0.0:1935:1935" # Comment if you don't want to use the live feature
- "127.0.0.1:10016:9000" # Uncomment if you use another webserver/proxy or test PeerTube in local, otherwise not suitable for production
volumes:
# Remove the following line if you want to use another webserver/proxy or test PeerTube in local
- assets:/app/client/dist
- ./docker-volume/data:/data
- ./docker-volume/config:/config
- /var/run/postgresql/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432:rw
# group_add:
# - 989
# devices:
# - /dev/dri:/dev/dri
depends_on:
- redis
restart: "always"
redis:
image: valkey/valkey:7.2-alpine
volumes:
- ./docker-volume/redis:/data
restart: "always"
volumes:
assets:
certbot-www:

23
rimgo/docker-compose.yml Normal file
View file

@ -0,0 +1,23 @@
services:
rimgo:
image: quay.io/pussthecatorg/rimgo:latest
container_name: rimgo
ports:
- "10001:3000" # Replace with "3000:3000" if you don't use a reverse proxy
environment:
- "PORT=3000"
- "ADDRESS=0.0.0.0"
- "IMGUR_CLIENT_ID=546c25a59c58ad7"
env_file: .env
restart: unless-stopped
labels:
- "com.centurylinklabs.watchtower.no-pull=true"
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/trending || exit 1
interval: 30s
timeout: 5s
retries: 2
networks:
default:
name: rimgo

View file

@ -0,0 +1,86 @@
# This compose file is compatible with Compose itself, it might need some
# adjustments to run properly with stack.
services:
synapse:
build:
context: .
dockerfile: Dockerfile
image: docker.io/matrixdotorg/synapse:latest
# Since synapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
- SYNAPSE_SERVER_NAME=nadeko.net
- SYNAPSE_REPORT_STATS=no
volumes:
# You may either store all the files in a local folder
- ./synapse_data:/data:rw
- ./synapse_data/homeserver.yaml:/data/homeserver.yaml:ro
- /var/run/postgresql/:/run/postgresql:rw
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
# In order to expose Synapse, remove one of the following, you might for
# instance expose the TLS port directly:
ports:
- 10022:8008/tcp
- 9183:9183/tcp
healthcheck:
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
interval: 15s
timeout: 5s
retries: 3
start_period: 5s
sliding-sync:
image: ghcr.io/matrix-org/sliding-sync:latest
restart: unless-stopped
volumes:
- /var/run/postgresql/:/run/postgresql:rw
env_file:
- path: ./sliding-sync/.env
required: true
environment:
- SYNCV3_SERVER=http://synapse:8008
- SYNCV3_BINDADDR=:8008
- SYNCV3_LOG_LEVEL=debug
ports:
- 10024:8008
depends_on:
- synapse
mautrix-telegram:
image: dock.mau.dev/mautrix/telegram:latest
restart: unless-stopped
volumes:
- ./mautrix-telegram_data:/data:z
- /var/run/postgresql/:/run/postgresql:rw
depends_on:
- synapse
mautrix-whatsapp:
image: dock.mau.dev/mautrix/whatsapp:latest
restart: unless-stopped
volumes:
- ./mautrix-whatsapp_data:/data:z
- /var/run/postgresql/:/run/postgresql:rw
depends_on:
- synapse
element:
image: vectorim/element-web:latest
restart: unless-stopped
volumes:
- ./element-web/config.json:/app/config.json:ro
ports:
- 10023:80/tcp
# TODO: Use pgbouncer
networks:
default:
name: synapse