diff --git a/README.md b/README.md index 9c2766d..0eb0c7d 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,14 @@ docker compose --profile gpu-nvidia up > If you have not used your Nvidia GPU with Docker before, please follow the > [Ollama Docker instructions](https://github.com/ollama/ollama/blob/main/docs/docker.md). +### For AMD GPU users on Linux + +``` +git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git +cd self-hosted-ai-starter-kit +docker compose --profile gpu-amd up +``` + #### For Mac / Apple Silicon users If you’re using a Mac with an M1 or newer processor, you can't expose your GPU diff --git a/docker-compose.yml b/docker-compose.yml index 6aae070..a1da9eb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -115,6 +115,14 @@ services: count: 1 capabilities: [gpu] + ollama-gpu-amd: + profiles: ["gpu-amd"] + <<: *service-ollama + image: ollama/ollama:rocm + devices: + - "/dev/kfd" + - "/dev/dri" + ollama-pull-llama-cpu: profiles: ["cpu"] <<: *init-ollama @@ -126,3 +134,10 @@ services: <<: *init-ollama depends_on: - ollama-gpu + + ollama-pull-llama-gpu-amd: + profiles: [gpu-amd] + <<: *init-ollama + image: ollama/ollama:rocm + depends_on: + - ollama-gpu-amd