-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathquantumrag-local.yaml.example
More file actions
59 lines (51 loc) · 1.47 KB
/
quantumrag-local.yaml.example
File metadata and controls
59 lines (51 loc) · 1.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# QuantumRAG 로컬 전용 설정 (Ollama)
# API 키 없이 완전히 로컬에서 실행합니다.
#
# 사전 준비:
# 1. brew install ollama (또는 https://ollama.ai)
# 2. ollama serve
# 3. ollama pull llama3.2 && ollama pull nomic-embed-text
#
# 사용법:
# cp quantumrag-local.yaml.example quantumrag-local.yaml
# quantumrag ingest ./docs --config quantumrag-local.yaml
# quantumrag query "요약해줘" --config quantumrag-local.yaml
project_name: "local-knowledge-base"
language: "ko"
domain: "general"
models:
embedding:
provider: "ollama"
model: "nomic-embed-text"
dimensions: 768
# base_url: "http://localhost:11434" # 기본값, 원격 서버 시 변경
generation:
simple:
provider: "ollama"
model: "llama3.2" # 3B — 빠르고 가벼움
# base_url: "http://localhost:11434"
medium:
provider: "ollama"
model: "llama3.2"
# base_url: "http://localhost:11434"
complex:
provider: "ollama"
model: "llama3.1" # 8B — 더 정확한 분석
# base_url: "http://localhost:11434"
hype:
provider: "ollama"
model: "llama3.2"
questions_per_chunk: 3
# base_url: "http://localhost:11434"
reranker:
provider: "flashrank" # 무료 로컬 리랭커
retrieval:
top_k: 5
rerank: true
compression: true
generation:
streaming: true
max_tokens: 2048
temperature: 0.1
storage:
data_dir: "./quantumrag_data"