This repository has been archived on 2023-05-02. You can view files and clone it, but you cannot make any changes to it's state, such as pushing and creating new issues, pull requests or comments.
neo/docker-compose.utils.yml
2023-02-15 09:08:36 +01:00

216 lines
No EOL
5 KiB
YAML

version: '2.2'
services:
searx:
restart: always
image: searxng/searxng
volumes:
- /srv/searx/config:/etc/searxng
environment:
- LETSENCRYPT_HOST=search.faulty.nl
- VIRTUAL_HOST=search.faulty.nl
- BASE_URL=https://search.faulty.nl
networks:
- proxy
flexo: # arch repo mirror
image: nroi/flexo:latest
restart: unless-stopped
environment:
- LETSENCRYPT_HOST=arch.neo.faulty.nl
- VIRTUAL_HOST=arch.neo.faulty.nl
- VIRTUAL_PORT=7878
volumes:
- /srv/flexo/config:/etc/flexo
networks:
- proxy
syncthing:
image: lscr.io/linuxserver/syncthing:latest
container_name: syncthing
environment:
- PUID=0
- PGID=0
- TZ=Europe/Amsterdam
- LETSENCRYPT_HOST=st.faulty.nl
- VIRTUAL_HOST=st.faulty.nl
- VIRTUAL_PORT=8384
volumes:
- /srv/syncthing/config:/config
- /srv/syncthing/data:/local
- /mnt/stash:/remote
ports:
- 22000:22000/tcp
- 22000:22000/udp
- 21027:21027/udp
restart: unless-stopped
networks:
- proxy
backups:
build:
context: ./custom/backups
user: root
environment:
- PYTHONUNBUFFERED=1
- IMMEDIATE=1
volumes:
- /srv:/data/services:ro
- /docker:/data/docker:ro
- /srv/backups/data:/local:rw
- /mnt/backups/neo:/remote:rw
librespeed:
image: adolfintel/speedtest:latest
environment:
- TITLE=FaultySpeed
- MODE=standalone
- LETSENCRYPT_HOST=neo.faulty.nl
- VIRTUAL_HOST=neo.faulty.nl
- VIRTUAL_PATH=/
- VIRTUAL_DEST=/
networks:
- proxy
transfersh:
image: dutchcoders/transfer.sh:latest
command: --provider local --basedir /tmp/ --purge-interval 360 --max-upload-size 524288 --rate-limit 20
environment:
- LETSENCRYPT_HOST=share.neo.ixvd.net
- VIRTUAL_HOST=share.neo.ixvd.net
- VIRTUAL_PORT=8080
networks:
- proxy
dendrite:
image: matrixdotorg/dendrite-monolith:latest
command: [
"--config=/etc/dendrite/dendrite.yml"
]
environment:
- LETSENCRYPT_HOST=2d.ixvd.net
- VIRTUAL_HOST=2d.ixvd.net
- VIRTUAL_PORT=8008
volumes:
- /srv/dendrite/config:/etc/dendrite
- /srv/dendrite/data:/var/dendrite/media
depends_on:
- dendrite-pg
networks:
- proxy
- internal
dendrite-pg:
image: postgres:14
environment:
- POSTGRES_USER=dendrite
- POSTGRES_PASSWORD=dendrite
healthcheck:
test: ["CMD-SHELL", "pg_isready -U dendrite"]
interval: 5s
timeout: 5s
retries: 5
volumes:
- /srv/dentrite/other/database:/var/lib/postgresql/data
networks:
- internal
grafana:
image: grafana/grafana-oss:latest
environment:
- LETSENCRYPT_HOST=grafana.neo.faulty.nl
- VIRTUAL_HOST=grafana.neo.faulty.nl
- VIRTUAL_PORT=3000
- GF_SERVER_ROOT_URL=https://grafana.neo.faulty.nl
volumes:
- /srv/grafana:/var/lib/grafana
networks:
- proxy
- metrics
- default
prometheus:
build:
context: ./custom/prometheus
networks:
- metrics
extra_hosts:
- "docker.host:host-gateway"
prom-blackbox:
image: prom/blackbox-exporter:latest
networks:
- metrics
## THIS CONTANIER IS AVAILABLE ON THE HOST ##
node-exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node-exporter
command:
- '--path.rootfs=/host'
network_mode: host
pid: host
restart: unless-stopped
volumes:
- '/:/host:ro,rslave'
cap_add:
- SYS_TIME
kasm:
image: lscr.io/linuxserver/kasm:latest
container_name: kasm
privileged: true
environment:
- KASM_PORT=8443
- TZ=Europe/London
- VIRTUAL_HOST=kasm.neo.faulty.nl
- LETSENCRYPT_HOST=kasm.neo.faulty.nl
- VIRTUAL_PORT=8443
- VIRTUAL_PROTO=https
volumes:
- /srv/kasm/data:/opt
- /srv/kasm/other/profiles:/profiles #optional
- /dev/input:/dev/input #optional
- /run/udev/data:/run/udev/data #optional
restart: unless-stopped
networks:
- proxy
invidious:
build:
context: custom/invidious
restart: unless-stopped
environment:
LETSENCRYPT_HOST: "yt.neo.ixvd.net"
VIRTUAL_HOST: "yt.neo.ixvd.net"
VIRTUAL_PORT: 3000
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/comments/jNQXAC9IVRw || exit 1
interval: 30s
timeout: 5s
retries: 2
logging:
options:
max-size: "1G"
max-file: "4"
depends_on:
- invidious-db
invidious-db:
build:
context: custom/invidious-db
restart: unless-stopped
volumes:
- /srv/invidious/other/db/data:/var/lib/postgresql/data
environment:
POSTGRES_DB: invidious
POSTGRES_USER: invidious
POSTGRES_PASSWORD: invidious
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
networks:
metrics: