-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
404 lines (349 loc) · 12.3 KB
/
app.py
File metadata and controls
404 lines (349 loc) · 12.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
from datetime import datetime, timedelta, timezone
import asyncio
from contextlib import asynccontextmanager, suppress
from typing import Dict, List, Literal, TypedDict
import json
import base64
import os
from dotenv import load_dotenv
from openai import AsyncOpenAI, APIError, RateLimitError, APIConnectionError
from starlette.applications import Starlette
from starlette.endpoints import WebSocketEndpoint
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Route, WebSocketRoute
from starlette.websockets import WebSocket
# First loading environment variables from .env
load_dotenv()
# Defining OpenAI API Key and raising error if key is missing
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise RuntimeError("Please set OPENAI_API_KEY in environment or .env")
# Initialising OpenAI async client with the API key
openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY)
# Keep in-memory sessions before GC intervals (Hours)
SESSION_TTL_HOURS = 12
# Structures for single chat message
class Message(TypedDict):
role: Literal["user", "assistant"]
content: str
timestamp: str
# Structure for in memory session data
class SessionData(TypedDict):
created_at: datetime
messages: List[Message]
# In-memory store for sessions: session_id -> SessionData
# One session per browser load (frontend generates sessionIds)
sessions: Dict[str, SessionData] = {}
# To create or get each session by Ids
def get_or_create_session(session_id: str) -> SessionData:
now = datetime.now(timezone.utc)
session = sessions.get(session_id)
if session is None:
session = SessionData(created_at=now, messages=[])
sessions[session_id] = session
return session
async def cleanup_old_sessions_loop():
"""
Background loop to clean sessions older than SESSION_TTL_HOURS.
No file system usage, all history is in-memory and ephemeral.
"""
while True:
try:
await asyncio.sleep(3600) # To run once per hour
now = datetime.now(timezone.utc)
cutoff = now - timedelta(hours=SESSION_TTL_HOURS)
# Clean in-memory sessions
to_delete = [sid for sid, s in sessions.items() if s["created_at"] < cutoff]
for sid in to_delete:
sessions.pop(sid, None)
except asyncio.CancelledError:
# Normal shutdown
break
except Exception as e:
# Log and keep the loop alive
print(f"[cleanup] Error during cleanup: {e}")
# Helpers for OpenAI
async def generate_answer(session: SessionData, user_content: str) -> str:
"""
Call OpenAI chat model with the session history and new user message.
History is in-memory only and scoped to a single sessionId.
"""
messages = [
{"role": "system", "content": "You are a concise assistant."}
]
# To add previous conversation
for m in session["messages"]:
messages.append({"role": m["role"], "content": m["content"]})
# To add new user message
messages.append({"role": "user", "content": user_content})
resp = await openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
)
assistant_text = resp.choices[0].message.content or ""
return assistant_text
async def generate_speech_bytes(text: str) -> bytes:
"""
Call OpenAI TTS to generate an MP3 and return its bytes.
We stream this over WebSocket; we do not persist it to disk.
"""
response = await openai_client.audio.speech.create(
model="gpt-4o-mini-tts",
voice="alloy",
input=text,
response_format="mp3",
)
# New SDK may return a binary response wrapper; get raw bytes from it
if hasattr(response, "read"):
audio_bytes = response.read()
else:
audio_bytes = response # fallback if it is already bytes-like
return audio_bytes
# Helper for WebSocket
async def send_json(ws: WebSocket, payload: dict):
await ws.send_text(json.dumps(payload))
# WS (WebSocket) Endpoint
class ChatWebSocket(WebSocketEndpoint):
encoding = "text"
async def on_connect(self, websocket: WebSocket) -> None:
await websocket.accept()
query = websocket.query_params
session_id = query.get("sessionId")
if not session_id:
await websocket.send_text(
json.dumps(
{
"type": "error",
"message": "Missing sessionId in query parameters.",
}
)
)
await websocket.close()
return
websocket.scope["session_id"] = session_id
print(f"[ws] Connected session {session_id}")
async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None:
session_id = websocket.scope.get("session_id")
print(f"[ws] Disconnected session {session_id}, code={close_code}")
async def on_receive(self, websocket: WebSocket, data) -> None:
"""
Handle a single prompt message from the client.
Expected payload:
{
"type": "prompt",
"sessionId": "...",
"requestId": "...",
"content": "User text..."
}
"""
try:
payload = json.loads(data)
except json.JSONDecodeError:
await send_json(
websocket,
{
"type": "error",
"message": "Invalid JSON payload.",
},
)
return
msg_type = payload.get("type")
if msg_type != "prompt":
await send_json(
websocket,
{
"type": "error",
"message": "Unsupported message type.",
},
)
return
# Retrieve and validate basic fields
session_id = payload.get("sessionId") or websocket.scope.get("session_id")
request_id = payload.get("requestId")
raw_content = payload.get("content", "")
content = str(raw_content).strip()
if not isinstance(request_id, str) or not request_id:
await send_json(
websocket,
{
"type": "error",
"message": "requestId must be a non-empty string.",
},
)
return
if not session_id:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "Missing sessionId.",
},
)
return
# To Ensure the sessionId matches the one bound to this WebSocket, if present
ws_session_id = websocket.scope.get("session_id")
if ws_session_id and ws_session_id != session_id:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "sessionId mismatch for this connection.",
},
)
return
if not content:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "content must be a non-empty string.",
},
)
return
if len(content) > 2000:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "content is too long (max 2000 characters).",
},
)
return
session = get_or_create_session(session_id)
now = datetime.now(timezone.utc).isoformat()
# Adding each message to history for in-memory session
session["messages"].append(
Message(role="user", content=content, timestamp=now)
)
# To Send status: llm_start
await send_json(
websocket,
{
"type": "status",
"requestId": request_id,
"phase": "llm_start",
},
)
try:
# Step1 -> Generate assistant text with timeout
assistant_text = await asyncio.wait_for(
generate_answer(session, content),
timeout=20,
)
# Add assistant message to history
session["messages"].append(
Message(
role="assistant",
content=assistant_text,
timestamp=datetime.now(timezone.utc).isoformat(),
)
)
# Step2 -> Status: tts_start
await send_json(
websocket,
{
"type": "status",
"requestId": request_id,
"phase": "tts_start",
},
)
# Step3 -> Generate speech audio bytes with timeout
audio_bytes = await asyncio.wait_for(
generate_speech_bytes(assistant_text),
timeout=30,
)
# Step4 -> Status: tts_done
await send_json(
websocket,
{
"type": "status",
"requestId": request_id,
"phase": "tts_done",
},
)
# Step5 -> Send the audio data over WebSocket (base64-encoded)
audio_b64 = base64.b64encode(audio_bytes).decode("ascii")
await send_json(
websocket,
{
"type": "audio",
"requestId": request_id,
"mimeType": "audio/mpeg",
"data": audio_b64,
},
)
# OpenAI calls errors handling
except RateLimitError:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "Rate limited by OpenAI, please retry shortly.",
},
)
except APIConnectionError:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "Trouble reaching OpenAI, please try again.",
},
)
except APIError as e:
print(f"[ws] OpenAI APIError: {e}")
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "OpenAI API error while generating response.",
},
)
except asyncio.TimeoutError:
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "Timed out while contacting OpenAI.",
},
)
except Exception as e:
print(f"[ws] Error handling prompt: {e}")
await send_json(
websocket,
{
"type": "error",
"requestId": request_id,
"message": "Internal error while generating response.",
},
)
# To check the health of BE using HTTP or Tests
async def health(request: Request) -> Response:
return JSONResponse({"status": "ok"})
# Routes
routes = [
Route("/health", health, methods=["GET"]),
WebSocketRoute("/ws", ChatWebSocket),
]
@asynccontextmanager
async def lifespan(app: Starlette):
# Startup: start background cleanup of old in-memory sessions
cleanup_task = asyncio.create_task(cleanup_old_sessions_loop())
print("[app] Started, cleanup loop running.")
try:
yield
finally:
# Shutdown: stop cleanup loop gracefully
cleanup_task.cancel()
with suppress(asyncio.CancelledError):
await cleanup_task
print("[app] Shutdown complete.")
app = Starlette(routes=routes, lifespan=lifespan)