-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose-intel.yml
More file actions
44 lines (41 loc) · 1.07 KB
/
docker-compose-intel.yml
File metadata and controls
44 lines (41 loc) · 1.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
version: "3.8"
services:
ollama:
image: intelanalytics/ipex-llm-inference-cpp-xpu:latest
command: bash -c "mkdir /llm/ollama && cd /llm/ollama && /usr/local/bin/init-ollama && ./ollama serve"
ports:
- "11434:11434"
volumes:
- ./ollama:/root/.ollama
devices:
- /dev/dri/card0:/dev/dri/card0
- /dev/dri/renderD128:/dev/dri/renderD128
environment:
- OLLAMA_HOST=0.0.0.0
- USE_XETLA=OFF
- SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
- SYCL_CACHE_PERSISTENT=1
networks:
- llm-network
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:11434"]
interval: 5s
timeout: 2s
retries: 5
openwebui:
image: ghcr.io/open-webui/open-webui:main
ports:
- "8080:8080"
depends_on:
ollama:
condition: service_healthy
environment:
- WEBUI_URL=http://localhost:8080
- WEBUI_AUTH=False
- OLLAMA_BASE_URL=http://ollama:11434
volumes:
- ./open-webui:/app/backend/data
networks:
- llm-network
networks:
llm-network: