-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFinBot.py
More file actions
191 lines (165 loc) · 7.79 KB
/
FinBot.py
File metadata and controls
191 lines (165 loc) · 7.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
from typing import Any
import streamlit as st
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
# CHANGED: Import HuggingFaceEndpoint from langchain_huggingface
from langchain_huggingface import HuggingFaceEndpoint
from langchain_community.embeddings import HuggingFaceEmbeddings
from dotenv import load_dotenv
import pymupdf # PyMuPDF
from PIL import Image
import os
import re
import tempfile
from huggingface_hub import login
from langchain.docstore.document import Document
load_dotenv()
class MyApp:
def __init__(self, hugging_face_api: str = None) -> None:
self.hugging_face_api: str = hugging_face_api
self.chain = None
self.chat_history: list = []
self.N: int = 0
self.count: int = 0
def __call__(self, file_path: str) -> Any:
if self.count == 0:
self.chain = self.build_chain(file_path)
self.count += 1
return self.chain
def process_file_in_chunks(self, file: str, chunk_size: int = 5):
doc = pymupdf.open(file)
chunks = []
for i in range(0, len(doc), chunk_size):
text = ""
start_page_num = i
for j in range(i, min(i + chunk_size, len(doc))):
text += doc[j].get_text()
chunks.append(Document(page_content=text, metadata={"page": start_page_num}))
return chunks
def build_chain(self, file_path: str):
chunks = self.process_file_in_chunks(file_path)
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-l6-v2")
pdfsearch = FAISS.from_documents(chunks, embeddings)
# --- IMPORTANT CHANGE HERE ---
# Using HuggingFaceEndpoint as recommended by Langchain for newer versions
# For google/gemma-2b-it, 'text-generation' is the common task.
# Add model_kwargs for parameters like max_new_tokens, temperature if needed.
llm = HuggingFaceEndpoint(
repo_id="google/gemma-2b-it", # Use the instruction-tuned version for Q&A
huggingfacehub_api_token=self.hugging_face_api,
task="text-generation", # Specify the task
# Optional: Adjust model parameters for better control over generation
# model_kwargs={"max_new_tokens": 512, "temperature": 0.7, "top_p": 0.95},
)
# --- END IMPORTANT CHANGE ---
chain = ConversationalRetrievalChain.from_llm(
llm, # Pass the instantiated LLM object
retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}),
return_source_documents=True,
)
return chain
def get_response(app, query, file_path):
if not file_path:
st.error("Upload a PDF")
return None
chain = app(file_path)
result = chain(
{"question": query, "chat_history": app.chat_history}, return_only_outputs=True
)
app.chat_history += [(query, result["answer"])]
if result["source_documents"] and 'page' in result["source_documents"][0].metadata:
app.N = result["source_documents"][0].metadata['page']
else:
app.N = 0
return result["answer"]
def render_file(file_path, page_num):
doc = pymupdf.open(file_path)
page_num = max(0, min(page_num, len(doc) - 1))
page = doc[page_num]
pix = page.get_pixmap(dpi=150)
image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
return image
def purge_chat():
if 'app_instance' in st.session_state:
st.session_state.app_instance.chat_history = []
st.session_state.app_instance.count = 0
st.session_state.app_instance.chain = None
if 'chain_built' in st.session_state:
del st.session_state['chain_built']
if 'tmp_file_path' in st.session_state:
del st.session_state['tmp_file_path']
st.set_page_config(page_title="FinBot")
st.title("💬 FinBot")
st.caption("🚀 LLM + RAG + Langchain powered!")
page_bg_img = '''
<style>
.stApp{
background-image: url("https://images.unsplash.com/photo-1483791424735-e9ad0209eea2?q=80&w=1887&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D");
background-size: cover;
}
[data-testid="stBottom"] > div {
background: transparent;
}
</style>
'''
st.markdown(page_bg_img,unsafe_allow_html=True)
huggingfacehub_api = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if huggingfacehub_api is None:
st.error("Hugging Face API token not found. Please set the HUGGINGFACEHUB_API_TOKEN environment variable.")
st.stop()
login(huggingfacehub_api)
if 'app_instance' not in st.session_state:
st.session_state.app_instance = MyApp(huggingfacehub_api)
app = st.session_state.app_instance
with st.sidebar:
uploaded_file = st.file_uploader("Upload a PDF", type=["pdf"])
if uploaded_file is not None:
if 'last_uploaded_file_name' not in st.session_state or \
st.session_state['last_uploaded_file_name'] != uploaded_file.name:
purge_chat()
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
tmp_file.write(uploaded_file.read())
tmp_file_path = tmp_file.name
st.session_state['tmp_file_path'] = tmp_file_path
st.session_state['last_uploaded_file_name'] = uploaded_file.name
st.success(f"File '{uploaded_file.name}' uploaded successfully!")
else:
st.info("Same file already uploaded.")
if st.button("Process Document"):
if 'tmp_file_path' not in st.session_state:
st.warning("Please upload a document first.")
elif 'chain_built' not in st.session_state or not st.session_state['chain_built']:
with st.spinner("Processing the document and generating embeddings..."):
try:
app(st.session_state['tmp_file_path'])
st.session_state['chain_built'] = True
st.success("Document processed and embeddings generated successfully!")
st.image(render_file(st.session_state['tmp_file_path'], 0), caption="Page 1")
except Exception as e:
st.error(f"An error occurred during processing: {e}")
else:
st.info("Document already processed. You can ask questions now.")
st.image(render_file(st.session_state['tmp_file_path'], 0), caption="Page 1")
if st.button("Clear Chat"):
purge_chat()
st.success("Chat history cleared and document chain reset.")
if 'tmp_file_path' in st.session_state:
st.image(render_file(st.session_state['tmp_file_path'], 0), caption="Page 1")
query = st.text_input("Enter your query here:")
if query:
if 'tmp_file_path' not in st.session_state or 'chain_built' not in st.session_state or not st.session_state['chain_built']:
st.warning("Please upload and process a PDF document first.")
else:
with st.spinner("Generating response..."):
response = get_response(app, query, st.session_state['tmp_file_path'])
if response:
pattern = r"Helpful Answer: (.+)"
match = re.search(pattern, response)
helpful_answer = match.group(1) if match else response
st.write(f"**Response:** {helpful_answer}")
if app.N is not None and 'tmp_file_path' in st.session_state:
st.image(render_file(st.session_state['tmp_file_path'], app.N), caption=f"Source Page: {app.N + 1}")
st.subheader("Chat History")
for q, a in app.chat_history:
st.write(f"**You:** {q}")
st.write(f"**FinBot:** {a}")