forked from we-promise/sure
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.example.ai.yml
More file actions
170 lines (160 loc) · 4.61 KB
/
compose.example.ai.yml
File metadata and controls
170 lines (160 loc) · 4.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
# ===========================================================================
# Example Docker Compose file with additional Ollama service for LLM tools
# ===========================================================================
#
# Purpose:
# --------
#
# This file is an example Docker Compose configuration for self hosting
# Sure with Ollama on your local machine or on a cloud VPS.
#
# The configuration below is a "standard" setup that works out of the box,
# but if you're running this outside of a local network, it is recommended
# to set the environment variables for extra security.
#
# Setup:
# ------
#
# To run this, you should read the setup guide:
#
# https://github.com/we-promise/sure/blob/main/docs/hosting/docker.md
#
# Troubleshooting:
# ----------------
#
# If you run into problems, you should open a Discussion here:
#
# https://github.com/we-promise/sure/discussions/categories/general
#
x-db-env: &db_env
POSTGRES_USER: ${POSTGRES_USER:-sure_user}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-sure_password}
POSTGRES_DB: ${POSTGRES_DB:-sure_production}
x-rails-env: &rails_env
<<: *db_env
SECRET_KEY_BASE: ${SECRET_KEY_BASE:-a7523c3d0ae56415046ad8abae168d71074a79534a7062258f8d1d51ac2f76d3c3bc86d86b6b0b307df30d9a6a90a2066a3fa9e67c5e6f374dbd7dd4e0778e13}
SELF_HOSTED: "true"
RAILS_FORCE_SSL: "false"
RAILS_ASSUME_SSL: "false"
DB_HOST: db
DB_PORT: 5432
REDIS_URL: redis://redis:6379/1
AI_DEBUG_MODE: "true" # Useful for debugging, set to "false" in production
# Ollama using OpenAI API compatible endpoints
OPENAI_ACCESS_TOKEN: token-can-be-any-value-for-ollama
OPENAI_MODEL: llama3.1:8b # Note: Use tool-enabled model
OPENAI_URI_BASE: http://ollama:11434/v1
# NOTE: enabling OpenAI will incur costs when you use AI-related features in the app (chat, rules). Make sure you have set appropriate spend limits on your account before adding this.
# OPENAI_ACCESS_TOKEN: ${OPENAI_ACCESS_TOKEN}
services:
# Note: You still have to download models manually using the ollama CLI or via Open WebUI
ollama:
volumes:
- ollama:/root/.ollama
container_name: ollama
hostname: ollama
restart: unless-stopped
image: docker.io/ollama/ollama:latest
ports:
- "11434:11434"
environment:
- OLLAMA_KEEP_ALIVE=1h
- OLLAMA_MODELS=deepseek-r1:8b,llama3.1:8b # Pre-load model on startup, you can change this to your preferred model
networks:
- sure_net
# Recommended: Enable GPU support
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [ gpu ]
ollama-webui:
image: ghcr.io/open-webui/open-webui
container_name: ollama-webui
volumes:
- ollama-webui:/app/backend/data
depends_on:
- ollama
ports:
- "8080:8080"
environment: # https://docs.openwebui.com/getting-started/env-configuration#default_models
- OLLAMA_BASE_URLS=http://host.docker.internal:11434 #comma separated ollama hosts
- ENV=dev
- WEBUI_AUTH=False
- WEBUI_NAME=AI
- WEBUI_URL=http://localhost:8080
- WEBUI_SECRET_KEY=t0p-s3cr3t
- NO_PROXY=host.docker.internal
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- sure_net
web:
image: ghcr.io/we-promise/sure:latest
volumes:
- app-storage:/rails/storage
ports:
- "3000:3000"
restart: unless-stopped
environment:
<<: *rails_env
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- sure_net
worker:
image: ghcr.io/we-promise/sure:latest
command: bundle exec sidekiq
volumes:
- app-storage:/rails/storage
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
environment:
<<: *rails_env
networks:
- sure_net
db:
image: postgres:16
restart: unless-stopped
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
<<: *db_env
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB" ]
interval: 5s
timeout: 5s
retries: 5
networks:
- sure_net
redis:
image: redis:latest
restart: unless-stopped
volumes:
- redis-data:/data
healthcheck:
test: [ "CMD", "redis-cli", "ping" ]
interval: 5s
timeout: 5s
retries: 5
networks:
- sure_net
volumes:
app-storage:
postgres-data:
redis-data:
ollama:
ollama-webui:
networks:
sure_net:
driver: bridge