Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 26 additions & 15 deletions ai-service/app/api/v1/chat.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,32 @@
import re
import logging

from fastapi import APIRouter, HTTPException
from app.services.memory_service import memory_service
from app.models.chat import ChatRequest, ChatResponse
from app.services.brain.graph import brain
from langchain_core.messages import HumanMessage, AIMessage

router = APIRouter()
logger = logging.getLogger(__name__)

@router.post("", response_model=ChatResponse)
async def chat(request: ChatRequest):
# Convert history dicts to LangChain messages
history = []
for msg in request.history or []:
if msg['role'] == 'user':
history.append(HumanMessage(content=msg['content']))
else:
history.append(AIMessage(content=msg['content']))

# Add current message
history.append(HumanMessage(content=request.message))

# Run Graph
try:
initial_state = {"messages": history, "emotion": "neutral"}
config = {"configurable": {"thread_id": request.session_id or "default"}}
conversation_id = request.conversation_id

if not conversation_id:
new_id = await memory_service.create_conversation()
conversation_id = str(new_id) if new_id else "default"

initial_state = {
"messages": [HumanMessage(content=request.message)],
"emotion": "neutral",
"conversation_id": conversation_id,
}

config = {"configurable": {"thread_id": conversation_id}}
result = brain.invoke(initial_state, config=config)

# Extract response
Expand All @@ -37,20 +42,26 @@ async def chat(request: ChatRequest):
"name": tc.get("name"),
"args": tc.get("args", {})
})

# Clean tags
text = last_msg
if text.startswith("["):
import re
match = re.match(r'^\[(.*?)\]', text)
if match:
text = text[match.end():].strip()

return ChatResponse(
text=text,
emotion=emotion,
conversation_id=conversation_id,
tools_used=tools_used if tools_used else None
)

except Exception as e:
logger.error(f"Chat error: {e}")

return ChatResponse(
text=f"Brain Freeze: {str(e)}",
text=f"Brain Freeze: {str(e)}",
emotion="confused",
conversation_id=request.conversation_id or "default",
)
1 change: 0 additions & 1 deletion ai-service/app/api/v1/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ async def upload_memory(file: UploadFile = File(...)):

# 3. Store Vectors
count = 0
await memory_service.ensure_collection()

for chunk in chunks:
await memory_service.store(
Expand Down
7 changes: 4 additions & 3 deletions ai-service/app/models/chat.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from pydantic import BaseModel
from typing import Optional

class ChatRequest(BaseModel):
message: str
history: list[dict] | None = None
session_id: str = "default"
conversation_id: Optional[str] = None

class ChatResponse(BaseModel):
text: str
emotion: str
emotion: str = "neutral"
conversation_id: Optional[str] = None
tools_used: list[dict] | None = None
44 changes: 44 additions & 0 deletions ai-service/app/models/database.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from __future__ import annotations
from pydantic import BaseModel, Field
from datetime import datetime
from uuid import UUID
from typing import Optional

# Table Messages

class Message(BaseModel):
id: UUID
conversation_id: UUID
role: str
content: str
emotion: str = "neutral"

class CreateMesssage(BaseModel):
conversation_id: UUID
role: str
content: str
emotion: str = "neutral"

# Table Conversation

class Conversation(BaseModel):
id: UUID
title: Optional[str] = None
created_at: datetime
updated_at: datetime

class CreateConversation(BaseModel):
title: Optional[str] = "New Conversation"

# Tabel Memories

class Memory(BaseModel):
id: UUID
content: str
metadata: dict = Field(default_factory=dict)
created_at: datetime

class CreateMemory(BaseModel):
content: str
metadata: dict = Field(default_factory=dict)

74 changes: 67 additions & 7 deletions ai-service/app/services/brain/nodes/generate.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,90 @@
import asyncio
import concurrent.futures
import logging

from uuid import UUID
from app.services.brain.state import BrainState
from app.services.llm import llm_service
from app.services.prompter import prompter
from app.services.memory_service import memory_service
from langchain_core.messages import AIMessage, HumanMessage

# Node to generate response based on persona, conversation history and detected emotion (convesation history not being tested yet)
session_history_window = 9999

def generate_response(state: BrainState) -> dict:
with concurrent.futures.ThreadPoolExecutor() as pool:
future = pool.submit(asyncio.run, generate(state))
return future.result()


# Node to generate response based on persona, conversation history and detected emotion (convesation history not being tested yet)
async def generate(state: BrainState) -> dict:

# BrainState contains conversation history and detected emotion
messages = state["messages"]
detected_emotion = state.get("emotion", "neutral")
raw_id = state.get("conversation_id") or ""

if not raw_id or raw_id == "default":
raise ValueError("BrainState missing valid conversation_id")

conversation_id = UUID(raw_id)

# Reformat messages to LLM format
messages_format = []
current_message = []
for msg in messages:
if isinstance(msg, HumanMessage):
messages_format.append({"role": "user", "content": msg.content})
current_message.append({"role": "user", "content": msg.content})
elif isinstance(msg, AIMessage):
messages_format.append({"role": "assistant", "content": msg.content})
current_message.append({"role": "assistant", "content": msg.content})
elif isinstance(msg, dict):
messages_format.append(msg)
current_message.append(msg)

# Add system prompt with persona and current time
if current_message:
user_message = current_message[-1]["content"]
else:
user_message = ""

# Load History
history_model, memories = await asyncio.gather(
memory_service.get_history(conversation_id, session_history_window),
memory_service.search(query=user_message, limit=3),
)

history = history_model

# System Prompt
system_message = prompter.build("", context=None)[0]
messages_format.insert(0, system_message)

if memories:
memory_block = "\n".join(f"-{message}" for message in memories)
system_message = {
"role" : "system",
"content": (system_message["content"] + f"Ingatan sebelumnya: \n {memory_block}")
}


# Add system prompt with persona and current time
messages_format = [system_message] + history + current_message

# Generate response from LLM
response = llm_service.generate(messages_format)
emotion = response.get("emotion", "neutral")

await asyncio.gather(
memory_service.add_interaction(
conversation_id=conversation_id,
user_text=user_message,
assistant_text=response["text"],
user_emotion=detected_emotion,
assistant_emotion=emotion
),

memory_service.store(
text=f"User: {user_message} \n AURA: {response['text']}",
metadata={"conversation_id": str(conversation_id)},
),
)

# Return response
return {"messages": [AIMessage(content=response["text"])], "emotion": response["emotion"]}
3 changes: 2 additions & 1 deletion ai-service/app/services/brain/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@
# BrainState for conversation history and emotion tracking
class BrainState(TypedDict):
messages: Annotated[List[BaseMessage], operator.add]
emotion: str
emotion: str
conversation_id: str
Loading