This repository was archived by the owner on Apr 6, 2026. It is now read-only.
forked from DreamLab-AI/nostr-rust-forum
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdocker-compose.prod.yml
More file actions
181 lines (174 loc) · 5.58 KB
/
docker-compose.prod.yml
File metadata and controls
181 lines (174 loc) · 5.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# =============================================================================
# Nostr-BBS Nostr - Docker Compose Production Deployment
# =============================================================================
# Production-ready multi-container setup with GCS storage hints
# Designed for cloud deployment with managed storage backends
# =============================================================================
version: '3.8'
services:
# Main application: Website + Nostr Relay
Nostr-BBS:
build:
context: .
dockerfile: Dockerfile
args:
- NODE_ENV=production
container_name: Nostr-BBS-nostr-prod
ports:
- "80:80"
- "443:443"
volumes:
# For GCS-backed storage, mount via gcsfuse
# Option 1: Local volume (default)
- strfry-data:/app/strfry-db
# Option 2: GCS FUSE mount (uncomment and configure)
# - type: bind
# source: /mnt/gcs/strfry-db
# target: /app/strfry-db
- ./relay/whitelist.json:/etc/strfry/whitelist.json:ro
# TLS certificates (mount from secrets or certbot)
- ./certs:/etc/nginx/certs:ro
environment:
- TZ=UTC
- NODE_ENV=production
# GCS configuration hints
- GCS_BUCKET=${GCS_RELAY_BUCKET:-}
- GCS_PROJECT_ID=${GCS_PROJECT_ID:-}
healthcheck:
test: ["CMD", "/usr/local/bin/healthcheck.sh"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
restart: always
networks:
- nostr-bbs-internal
- nostr-bbs-public
deploy:
resources:
limits:
cpus: '2.0'
memory: 1G
reservations:
cpus: '0.5'
memory: 256M
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
# Reverse Proxy / Load Balancer (optional, for multi-instance)
# Uncomment for production with SSL termination
# nginx-proxy:
# image: nginx:alpine
# container_name: nostr-bbs-proxy
# ports:
# - "80:80"
# - "443:443"
# volumes:
# - ./nginx/nginx.prod.conf:/etc/nginx/nginx.conf:ro
# - ./certs:/etc/nginx/certs:ro
# depends_on:
# - Nostr-BBS
# restart: always
# networks:
# - nostr-bbs-public
# Cloudflared tunnel for production
cloudflared:
image: cloudflare/cloudflared:latest
container_name: nostr-bbs-tunnel-prod
command: tunnel --no-autoupdate run
environment:
- TUNNEL_TOKEN=${CLOUDFLARED_TOKEN}
depends_on:
Nostr-BBS:
condition: service_healthy
restart: always
networks:
- nostr-bbs-internal
deploy:
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 5
window: 120s
volumes:
strfry-data:
driver: local
# For GCS-backed volumes on GKE, use the following:
# driver: gcsfuse
# driver_opts:
# bucket: ${GCS_RELAY_BUCKET}
# location: ${GCS_REGION:-us-central1}
networks:
# Internal network for service-to-service communication
nostr-bbs-internal:
driver: bridge
internal: true
# Public network for external access
nostr-bbs-public:
driver: bridge
# =============================================================================
# Production Deployment Instructions:
# =============================================================================
#
# Prerequisites:
# 1. Create GCS bucket for data persistence:
# gsutil mb -l us-central1 gs://${GCS_RELAY_BUCKET}
#
# 2. Create service account with Storage Admin role:
# gcloud iam service-accounts create nostr-bbs-sa
# gcloud projects add-iam-policy-binding PROJECT_ID \
# --member="serviceAccount:nostr-bbs-sa@PROJECT_ID.iam.gserviceaccount.com" \
# --role="roles/storage.admin"
#
# 3. Download service account key:
# gcloud iam service-accounts keys create ./secrets/gcs-credentials.json \
# --iam-account=nostr-bbs-sa@PROJECT_ID.iam.gserviceaccount.com
#
# 4. Create .env file:
# CLOUDFLARED_TOKEN=your-tunnel-token
# GCS_PROJECT_ID=your-project-id
# GCS_RELAY_BUCKET=nostr-bbs-strfry-data
# GCS_REGION=us-central1
#
# 5. Create secrets directory:
# mkdir -p ./secrets ./certs
#
# 6. Deploy:
# docker-compose -f docker-compose.prod.yml up -d --build
#
# 7. View logs:
# docker-compose -f docker-compose.prod.yml logs -f
#
# 8. Backup (to GCS):
# gsutil -m rsync -r /mnt/gcs/strfry-db gs://${GCS_RELAY_BUCKET}-backup/
#
# =============================================================================
# GCS FUSE Setup (for local volume with GCS backing):
# =============================================================================
#
# 1. Install gcsfuse on host:
# curl -fsSL https://github.com/GoogleCloudPlatform/gcsfuse/releases/download/v2.0.0/gcsfuse_2.0.0_amd64.deb -o gcsfuse.deb
# sudo dpkg -i gcsfuse.deb
#
# 2. Authenticate:
# gcloud auth application-default login
#
# 3. Mount bucket:
# sudo mkdir -p /mnt/gcs/strfry-db
# gcsfuse --implicit-dirs ${GCS_RELAY_BUCKET} /mnt/gcs/strfry-db
#
# 4. Update docker-compose to use bind mounts instead of named volumes
#
# =============================================================================
# Kubernetes / GKE Deployment:
# =============================================================================
#
# For GKE deployment, convert this compose file to Kubernetes manifests:
# kompose convert -f docker-compose.prod.yml
#
# Or use the GCS Fuse CSI driver for persistent volumes:
# https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/cloud-storage-fuse-csi-driver
#
# =============================================================================