-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlore_utils.py
More file actions
87 lines (72 loc) · 3.25 KB
/
lore_utils.py
File metadata and controls
87 lines (72 loc) · 3.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import json
import logging
import os
import urllib.request
from enum import Enum
_CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json")
class MessageSource(Enum):
DISCORD_TEXT = 0,
DISCORD_TEXT_AND_IMAGE = 1,
DISCORD_VOICE = 2,
LOCAL = 3
def get_key_from_json_config_file(key_name: str, default: str) -> str | None:
try:
with open(_CONFIG_PATH, 'r') as file:
data = json.load(file).get(key_name)
if not data:
return default
return data # Get the key value by key name
except FileNotFoundError:
print(f"Error: config.json not found at {_CONFIG_PATH}")
except json.JSONDecodeError:
print(f"Error: config.json is not valid JSON.")
except Exception as e:
print(f"Error reading config: {e}")
return default
SYSTEM_DESCRIPTION = get_key_from_json_config_file("role_description",
"You are an AI conversationalist named Lore Compendium, you respond to the user's messages with sophisticated, sardonic, and witty remarks like an English butler.")
THINKING_OLLAMA_MODEL = get_key_from_json_config_file("thinking_ollama_model", "gpt-oss")
FAST_OLLAMA_MODEL = get_key_from_json_config_file("fast_ollama_model", "llama3.2")
EMBEDDING_MODEL = get_key_from_json_config_file("embedding_model", "mxbai-embed-large")
RERANK_MODEL = get_key_from_json_config_file("rerank_model", "") # empty = reranking disabled
SUPPORTED_EXTENSIONS = ('.docx', '.pdf', '.xlsx', '.csv', '.txt', '.md')
DOC_FOLDER = "./input"
CHROMA_DB_PATH = "./chroma_store"
CHROMA_COLLECTION_NAME = "word_docs_rag"
def get_config() -> dict:
"""Return the full contents of config.json as a dict."""
try:
with open(_CONFIG_PATH, "r") as f:
return json.load(f)
except Exception:
return {}
def save_config(updates: dict) -> None:
"""Merge *updates* into config.json, preserving all other keys."""
existing = get_config()
existing.update(updates)
with open(_CONFIG_PATH, "w") as f:
json.dump(existing, f, indent=2)
def setup_logging(level: int = logging.INFO) -> None:
"""Configure the root logger. Call once from the bot entry point."""
logging.basicConfig(
level=level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def check_ollama_health() -> list[str]:
"""
Checks that Ollama is reachable and that all configured models are installed.
Returns a list of human-readable error strings; empty means everything is OK.
"""
errors = []
try:
with urllib.request.urlopen("http://localhost:11434/api/tags", timeout=5) as resp:
data = json.loads(resp.read())
available = {m["name"] for m in data.get("models", [])}
for model in [THINKING_OLLAMA_MODEL, FAST_OLLAMA_MODEL, EMBEDDING_MODEL]:
if not any(a == model or a.startswith(model + ":") for a in available):
errors.append(f" Model '{model}' is not installed. Run: ollama pull {model}")
except OSError:
errors.append(" Cannot connect to Ollama at http://localhost:11434")
errors.append(" Make sure Ollama is installed and running.")
return errors