|
| 1 | +# ============================================================ |
| 2 | +# CodeTrans — Environment Configuration |
| 3 | +# ============================================================ |
| 4 | + |
| 5 | +# Backend port |
| 6 | +BACKEND_PORT=5001 |
| 7 | + |
| 8 | +# ============================================================ |
| 9 | +# Inference Provider |
| 10 | +# ============================================================ |
| 11 | +# "remote" — Cloud or enterprise OpenAI-compatible API (e.g. CodeLlama via gateway) |
| 12 | +# "ollama" — Local Ollama running natively on the host machine (recommended for Mac) |
| 13 | +INFERENCE_PROVIDER=remote |
| 14 | + |
| 15 | +# ============================================================ |
| 16 | +# Option A: Remote OpenAI-compatible API (INFERENCE_PROVIDER=remote) |
| 17 | +# ============================================================ |
| 18 | +# INFERENCE_API_ENDPOINT: Base URL of your inference service (no /v1 suffix) |
| 19 | +# - GenAI Gateway: https://genai-gateway.example.com |
| 20 | +# - APISIX Gateway: https://apisix-gateway.example.com/CodeLlama-34b-Instruct-hf |
| 21 | +INFERENCE_API_ENDPOINT=https://your-api-endpoint.com/deployment |
| 22 | +INFERENCE_API_TOKEN=your-pre-generated-token-here |
| 23 | +INFERENCE_MODEL_NAME=codellama/CodeLlama-34b-Instruct-hf |
| 24 | + |
| 25 | +# ============================================================ |
| 26 | +# Option B: Ollama — native host inference (INFERENCE_PROVIDER=ollama) |
| 27 | +# ============================================================ |
| 28 | +# |
| 29 | +# IMPORTANT — Why Ollama runs on the host, NOT in Docker: |
| 30 | +# On macOS (Apple Silicon / M-series), running Ollama as a Docker container |
| 31 | +# bypasses Metal GPU acceleration. The model falls back to CPU-only inference |
| 32 | +# which is dramatically slower. Ollama must be installed natively so the Metal |
| 33 | +# Performance Shaders (MPS) backend is used for hardware-accelerated inference. |
| 34 | +# |
| 35 | +# Setup: |
| 36 | +# 1. Install Ollama: https://ollama.com/download |
| 37 | +# 2. Pull your model (see options below) |
| 38 | +# 3. Ollama starts automatically; confirm it is running: |
| 39 | +# curl http://localhost:11434/api/tags |
| 40 | +# 4. Set the variables below in your .env |
| 41 | +# |
| 42 | +# The backend container reaches host-side Ollama via the special DNS name |
| 43 | +# `host.docker.internal` which Docker Desktop resolves to the Mac host. |
| 44 | +# (On Linux with Docker Engine this requires the extra_hosts entry in docker-compose.yaml, |
| 45 | +# which is already configured.) |
| 46 | +# |
| 47 | +# --- Production / high-quality translation --- |
| 48 | +# INFERENCE_PROVIDER=ollama |
| 49 | +# INFERENCE_API_ENDPOINT=http://host.docker.internal:11434 |
| 50 | +# INFERENCE_MODEL_NAME=codellama:34b |
| 51 | +# ollama pull codellama:34b # ~20 GB, best quality |
| 52 | +# |
| 53 | +# --- Testing / SLM performance benchmarking --- |
| 54 | +# INFERENCE_PROVIDER=ollama |
| 55 | +# INFERENCE_API_ENDPOINT=http://host.docker.internal:11434 |
| 56 | +# INFERENCE_MODEL_NAME=codellama:7b |
| 57 | +# ollama pull codellama:7b # ~4 GB, fast — use this for gauging SLM perf |
| 58 | +# |
| 59 | +# --- Other recommended code models --- |
| 60 | +# ollama pull deepseek-coder:6.7b # ~4 GB, strong at code tasks |
| 61 | +# ollama pull qwen2.5-coder:7b # ~4 GB, excellent multilingual code |
| 62 | +# ollama pull codellama:13b # ~8 GB, good balance of speed vs quality |
| 63 | +# |
| 64 | +# Note: INFERENCE_API_TOKEN is not required when using Ollama. |
| 65 | + |
| 66 | +# ============================================================ |
| 67 | +# LLM Settings |
| 68 | +# ============================================================ |
| 69 | +LLM_TEMPERATURE=0.2 |
| 70 | +LLM_MAX_TOKENS=4096 |
| 71 | + |
| 72 | +# ============================================================ |
| 73 | +# Code Translation Settings |
| 74 | +# ============================================================ |
| 75 | +MAX_CODE_LENGTH=8000 |
| 76 | +MAX_FILE_SIZE=10485760 |
| 77 | + |
| 78 | +# ============================================================ |
| 79 | +# CORS Configuration |
| 80 | +# ============================================================ |
| 81 | +CORS_ALLOW_ORIGINS=["http://localhost:5173", "http://localhost:3000"] |
| 82 | + |
| 83 | +# ============================================================ |
| 84 | +# Local URL Endpoint |
| 85 | +# ============================================================ |
| 86 | +# Only needed if your remote API endpoint is a private domain mapped in /etc/hosts. |
| 87 | +# Otherwise leave as "not-needed". |
| 88 | +LOCAL_URL_ENDPOINT=not-needed |
| 89 | + |
| 90 | +# ============================================================ |
| 91 | +# SSL Verification |
| 92 | +# ============================================================ |
| 93 | +# Set to false only for development with self-signed certificates. |
| 94 | +VERIFY_SSL=true |
0 commit comments