-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
82 lines (71 loc) · 2.5 KB
/
model.py
File metadata and controls
82 lines (71 loc) · 2.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import warnings
warnings.filterwarnings('ignore')
from langchain.llms.base import LLM
from typing import Any, List, Optional, Iterator
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.outputs import GenerationChunk
from langchain_core.language_models.base import LanguageModelInput
from langchain_core.runnables.config import RunnableConfig
from openai import OpenAI
import dotenv
import os
dotenv.load_dotenv()
class RagLLM(LLM):
client: Optional[OpenAI] = None
def __init__(self):
super().__init__()
self.client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=os.getenv("OPENROUTER_API_KEY"),
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any
) -> str:
response = self.client.chat.completions.create(
model="deepseek/deepseek-chat-v3-0324",
messages=[
{"role": "user", "content": prompt},
],
max_tokens=1024,
temperature=kwargs.get('temperature', 0.1)
)
return response.choices[0].message.content
def _stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
if isinstance(input, str):
prompt = input
else:
prompt = str(input)
response = self.client.chat.completions.create(
model="deepseek/deepseek-chat-v3-0324",
messages=[
{"role": "user", "content": prompt},
],
max_tokens=1024,
stream=True,
temperature=kwargs.get('temperature', 0.1),
)
for chunk in response:
if chunk.choices[0].delta.content:
yield GenerationChunk(text=chunk.choices[0].delta.content)
@property
def _llm_type(self) -> str:
return "rag_llm_deepseek/deepseek-chat-v3-0324"
from langchain_huggingface import HuggingFaceEmbeddings
class RagEmbedding(object):
def __init__(self, model_name="BAAI/bge-m3",
device="cpu"):
self.embedding = HuggingFaceEmbeddings(model_name=model_name,
model_kwargs={"device": device})
def get_embedding_fun(self):
return self.embedding