-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
200 lines (184 loc) · 8.53 KB
/
docker-compose.yml
File metadata and controls
200 lines (184 loc) · 8.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
# Arrmate — docker-compose.yml
#
# BUILD FROM SOURCE (development / self-hosted build)
# For the pre-built Docker Hub image, use docker-compose.prod.yml instead.
#
# USAGE:
# cp .env.example .env && nano .env
# docker compose up -d
#
# GPU ACCELERATION FOR OLLAMA:
# Set COMPOSE_FILE in your .env to merge the right override:
# NVIDIA: COMPOSE_FILE=docker-compose.yml:docker-compose.ollama-nvidia.yml
# AMD: COMPOSE_FILE=docker-compose.yml:docker-compose.ollama-amd.yml
#
# EXTERNAL OLLAMA:
# If Ollama runs on a different machine, set OLLAMA_BASE_URL in .env
# to that machine's address (e.g. http://192.168.1.x:11434) and
# comment out the ollama service below to avoid a duplicate instance.
services:
# ─────────────────────────────────────────────
# Arrmate — data permission fix (runs once at startup)
#
# Ensures /data files are owned by the arrmate user before the app starts.
# This covers upgrades from pre-v2.0.0 images that ran as root and left
# data files (users.db, services.json, etc.) with root ownership.
# ─────────────────────────────────────────────
arrmate-init:
build:
context: .
dockerfile: Dockerfile
volumes:
- arrmate-data:/data
entrypoint: ["/bin/sh", "-c", "chown -R arrmate:arrmate /data && echo 'arrmate: /data ownership verified'"]
user: root
restart: "no"
networks:
- arrmate-net
# ─────────────────────────────────────────────
# Arrmate (build from source)
# ─────────────────────────────────────────────
arrmate:
build:
context: .
dockerfile: Dockerfile
container_name: arrmate
restart: unless-stopped
ports:
- "${API_PORT:-8000}:8000"
environment:
# Application
- API_HOST=0.0.0.0
- API_PORT=${API_PORT:-8000}
- LOG_LEVEL=${LOG_LEVEL:-INFO}
# LLM provider (ollama | openai | anthropic)
- LLM_PROVIDER=${LLM_PROVIDER:-ollama}
# Ollama
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434}
- OLLAMA_MODEL=${OLLAMA_MODEL:-qwen2.5:7b}
# OpenAI (leave blank to disable)
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- OPENAI_MODEL=${OPENAI_MODEL:-gpt-4o}
# Anthropic (leave blank to disable)
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- ANTHROPIC_MODEL=${ANTHROPIC_MODEL:-claude-3-5-sonnet-20241022}
# ── Media Services ──────────────────────────────────────────
# Commented out in .env = empty string here = not shown in UI
- SONARR_URL=${SONARR_URL:-}
- SONARR_API_KEY=${SONARR_API_KEY:-}
- RADARR_URL=${RADARR_URL:-}
- RADARR_API_KEY=${RADARR_API_KEY:-}
- LIDARR_URL=${LIDARR_URL:-}
- LIDARR_API_KEY=${LIDARR_API_KEY:-}
- BAZARR_URL=${BAZARR_URL:-}
- BAZARR_API_KEY=${BAZARR_API_KEY:-}
- AUDIOBOOKSHELF_URL=${AUDIOBOOKSHELF_URL:-}
- AUDIOBOOKSHELF_API_KEY=${AUDIOBOOKSHELF_API_KEY:-}
- LAZYLIBRARIAN_URL=${LAZYLIBRARIAN_URL:-}
- LAZYLIBRARIAN_API_KEY=${LAZYLIBRARIAN_API_KEY:-}
- READMEABOOK_URL=${READMEABOOK_URL:-}
- READMEABOOK_API_KEY=${READMEABOOK_API_KEY:-}
- READARR_URL=${READARR_URL:-}
- READARR_API_KEY=${READARR_API_KEY:-}
- PLEX_URL=${PLEX_URL:-}
- PLEX_TOKEN=${PLEX_TOKEN:-}
# ── Authentication (optional) ─────────────────────────────
- SECRET_KEY=${SECRET_KEY:-}
- AUTH_DATA_DIR=/data
# ── Download Managers (optional) ─────────────────────────
- SABNZBD_URL=${SABNZBD_URL:-}
- SABNZBD_API_KEY=${SABNZBD_API_KEY:-}
- NZBGET_URL=${NZBGET_URL:-}
- NZBGET_USERNAME=${NZBGET_USERNAME:-}
- NZBGET_PASSWORD=${NZBGET_PASSWORD:-}
- QBITTORRENT_URL=${QBITTORRENT_URL:-}
- QBITTORRENT_USERNAME=${QBITTORRENT_USERNAME:-}
- QBITTORRENT_PASSWORD=${QBITTORRENT_PASSWORD:-}
- TRANSMISSION_URL=${TRANSMISSION_URL:-}
- TRANSMISSION_USERNAME=${TRANSMISSION_USERNAME:-}
- TRANSMISSION_PASSWORD=${TRANSMISSION_PASSWORD:-}
# ── Transcoding (optional) ────────────────────────────────
# CRF: 18=high quality (large), 28=default, 32=smaller/lower quality
- TRANSCODE_CRF=${TRANSCODE_CRF:-28}
# Preset: ultrafast/fast/medium/slow/veryslow
- TRANSCODE_PRESET=${TRANSCODE_PRESET:-medium}
volumes:
- arrmate-data:/data
# ── H.265 Transcoding (optional) ─────────────────────────────────────────
# Mount your media directories at the SAME paths that Sonarr/Radarr report
# so ffmpeg can access the files. Adjust to match your setup:
# - /your/movies:/movies
# - /your/tv:/tv
networks:
- arrmate-net
# If using Traefik, also add the Traefik network:
# - traefik
depends_on:
arrmate-init:
condition: service_completed_successfully
ollama:
condition: service_started
healthcheck:
test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8000/health')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# ── Traefik labels (uncomment if using Traefik) ──────────────
# Also set TRAEFIK_DOMAIN, TRAEFIK_ENTRYPOINT, TRAEFIK_CERTRESOLVER,
# and TRAEFIK_NETWORK in your .env
# labels:
# - "traefik.enable=true"
# - "traefik.http.routers.arrmate.rule=Host(`${TRAEFIK_DOMAIN}`)"
# - "traefik.http.routers.arrmate.entrypoints=${TRAEFIK_ENTRYPOINT:-websecure}"
# - "traefik.http.routers.arrmate.tls.certresolver=${TRAEFIK_CERTRESOLVER:-letsencrypt}"
# - "traefik.http.services.arrmate.loadbalancer.server.port=8000"
# - "traefik.docker.network=${TRAEFIK_NETWORK:-traefik}"
# ─────────────────────────────────────────────
# Ollama — Local LLM
# ─────────────────────────────────────────────
# Comment this entire service out if you are using:
# - An external Ollama on another machine
# - OpenAI or Anthropic as your LLM provider
#
# GPU acceleration is handled by override files:
# NVIDIA: docker-compose.ollama-nvidia.yml
# AMD: docker-compose.ollama-amd.yml
# Set COMPOSE_FILE in .env to include the override.
ollama:
image: ollama/ollama:latest
container_name: arrmate-ollama
restart: unless-stopped
# Port 11434 is reachable within arrmate-net via http://ollama:11434.
# No host binding needed — uncomment only if external access is required.
# ports:
# - "11434:11434"
volumes:
- ollama-data:/root/.ollama
networks:
- arrmate-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
# ─────────────────────────────────────────────────────────────────
# Networks
# ─────────────────────────────────────────────────────────────────
networks:
arrmate-net:
name: arrmate-net
# Traefik network — only needed if TRAEFIK_NETWORK is set.
# Must match the network your Traefik instance is on.
# traefik:
# external: true
# name: ${TRAEFIK_NETWORK:-traefik}
# ─────────────────────────────────────────────────────────────────
# Volumes
# ─────────────────────────────────────────────────────────────────
volumes:
arrmate-data:
driver: local
ollama-data:
driver: local