-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
74 lines (59 loc) · 2.53 KB
/
app.py
File metadata and controls
74 lines (59 loc) · 2.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from flask import Flask, request, jsonify, send_from_directory
import google.generativeai as genai
import os
app = Flask(__name__, static_folder='static')
# Configure Gemini API
api_key = os.environ.get('GEMINI_API_KEY', 'AIzaSyDeHPwENAx3vZwFop1wLn3vYIpmKLxvuEc')
genai.configure(api_key=api_key)
# Initialize model
model = genai.GenerativeModel('gemini-3-pro-preview')
SYSTEM_PROMPT = """You are a versatile, friendly, and intelligent AI assistant.
Guidelines:
1. Respond in the SAME LANGUAGE the user uses (Vietnamese → Vietnamese, English → English, etc.)
2. For code questions: Provide code in proper blocks with language tags (```python, ```javascript) and explanations
3. For general questions: Be natural, concise, and helpful
4. For creative requests: Make them engaging and imaginative
5. Maintain conversation context - refer to previous messages when relevant
6. Always be positive, safe, and encourage interaction
7. Think step-by-step for complex problems
Created by Gia Khải"""
@app.route('/')
def serve_index():
"""Serve the main HTML page"""
return send_from_directory(app.static_folder, 'index.html')
@app.route('/chat', methods=['POST'])
def chat():
"""Handle chat messages"""
try:
# Get request data
data = request.get_json()
if not data:
return jsonify({'error': 'Invalid request'}), 400
user_message = data.get('message', '').strip()
conversation_history = data.get('history', '')
if not user_message:
return jsonify({'error': 'Message is required'}), 400
# Build full context
full_prompt = SYSTEM_PROMPT
if conversation_history:
full_prompt += f"\n\n--- Conversation History ---\n{conversation_history}\n"
full_prompt += f"\n--- Current User Request ---\n{user_message}"
# Generate response
response = model.generate_content(
full_prompt,
generation_config={
'temperature': 0.7,
'top_p': 0.9,
'top_k': 40,
'max_output_tokens': 2048,
}
)
ai_reply = response.text
return jsonify({'reply': ai_reply})
except Exception as e:
print(f"Error: {str(e)}") # Log error
return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
if __name__ == '__main__':
print("🚀 Starting AI Assistant...")
print("📡 Server running on http://localhost:5000")
app.run(host='0.0.0.0', port=5000, debug=True)