diff --git a/backend/backend/application/context/chat_ai_context.py b/backend/backend/application/context/chat_ai_context.py
index 8506dca..be966f5 100644
--- a/backend/backend/application/context/chat_ai_context.py
+++ b/backend/backend/application/context/chat_ai_context.py
@@ -326,9 +326,18 @@ def _process_completed(self, *args, **kwargs):
# On successful completion, Closing the event thread
raise StopIteration
+ def _process_chat_intent(self, *args, **kwargs):
+ send_socket_message(
+ sid=kwargs["sid"],
+ channel_id=kwargs["channel_id"],
+ chat_id=kwargs["chat_id"],
+ chat_message_id=kwargs["chat_message_id"],
+ chat_intent_name=kwargs.get("chat_intent"),
+ )
+
def process_event(self, *args, **kwargs):
supported_events = ["thought_chain", "prompt_response",
- "summary", "chat_name", "completed"]
+ "summary", "chat_name", "completed", "chat_intent"]
event_type = kwargs.get("event_type")
if event_type not in supported_events:
raise ValueError(f"Unsupported event type: {event_type}")
diff --git a/backend/backend/application/context/chat_message_context.py b/backend/backend/application/context/chat_message_context.py
index aeb41d9..ddd11de 100644
--- a/backend/backend/application/context/chat_message_context.py
+++ b/backend/backend/application/context/chat_message_context.py
@@ -144,37 +144,30 @@ def persist_prompt(
llm_model_architect: str,
llm_model_developer: str,
generated_chat_res_id: str = None,
- chat_intent_id: str = None,
chat_id: str = None,
user=None,
) -> ChatMessage:
"""
Create a new prompt within a Chat. If chat_id is None, create a new Chat.
Return the chat_message_id of the newly created ChatMessage.
+ chat_intent is left null here; the AI service auto-detects it and the
+ backend persists it on the message when the response arrives.
"""
if not prompt.strip():
raise InvalidChatPrompt()
- chat_intent = None
transformation_type = 'TRANSFORM' if discussion_type == 'GENERATE' else 'DISCUSSION'
- if chat_intent_id:
- try:
- chat_intent = ChatIntent.objects.get(chat_intent_id=chat_intent_id)
- except ChatIntent.DoesNotExist:
- chat_intent = None
if not chat_id:
chat = Chat.objects.create(
project=self.project_instance,
chat_name="Untitled Chat",
- chat_intent=chat_intent,
llm_model_architect=llm_model_architect,
llm_model_developer=llm_model_developer,
user=user,
)
else:
chat = self._get_chat_or_raise(chat_id=chat_id, must_be_active=True)
- chat.chat_intent = chat_intent
chat.llm_model_architect = llm_model_architect
chat.llm_model_developer = llm_model_developer
chat.discussion_type = discussion_type
@@ -185,7 +178,6 @@ def persist_prompt(
chat_message = ChatMessage.objects.create(
chat=chat,
prompt=prompt,
- chat_intent=chat_intent,
llm_model_architect=llm_model_architect,
llm_model_developer=llm_model_developer,
discussion_type= discussion_type,
diff --git a/backend/backend/application/context/llm_context.py b/backend/backend/application/context/llm_context.py
index b9c563e..2dfc09d 100644
--- a/backend/backend/application/context/llm_context.py
+++ b/backend/backend/application/context/llm_context.py
@@ -1,6 +1,6 @@
import json
import logging
-from typing import Any
+from typing import Any, Optional
import eventlet
import redis
@@ -52,12 +52,47 @@ def create_redis_xgroup(self, channel_id, group_id):
else:
raise
+ def _resolve_chat_intent(self, chat_message_id: str, data: dict, content: Any) -> Optional[str]:
+ """
+ Pull the AI-detected chat_intent out of the inbound payload and persist it
+ to ChatMessage.chat_intent the first time we see it for a given message.
+ Subsequent events re-use the cached value.
+ """
+ if not hasattr(self, "_chat_intent_by_msg"):
+ self._chat_intent_by_msg = {}
+
+ cached = self._chat_intent_by_msg.get(chat_message_id)
+ if cached:
+ return cached
+
+ intent_name = data.get("chat_intent")
+ if not intent_name and isinstance(content, dict):
+ intent_name = content.get("chat_intent")
+ if not intent_name:
+ return None
+
+ try:
+ from backend.core.models.chat_intent import ChatIntent
+ from backend.core.models.chat_message import ChatMessage
+ chat_intent = ChatIntent.objects.get(name=intent_name)
+ chat_message = ChatMessage.objects.get(chat_message_id=chat_message_id)
+ chat_message.chat_intent = chat_intent
+ chat_message.save(update_fields=["chat_intent"])
+ # Mirror the pre-PR behavior where Chat.chat_intent tracked the
+ # latest message's intent (used by PastConversations badge).
+ chat_message.chat.chat_intent = chat_intent
+ chat_message.chat.save(update_fields=["chat_intent"])
+ except Exception as e:
+ logging.error(f"Failed to persist chat_intent={intent_name}: {e}")
+
+ self._chat_intent_by_msg[chat_message_id] = intent_name
+ return intent_name
+
def process_message(
self,
sid: str,
channel_id: str,
chat_id: str,
- chat_intent: str,
payload: dict[str, Any],
discussion_status: str
):
@@ -75,6 +110,7 @@ def process_message(
2: "summary",
3: "chat_name",
4: "completed",
+ 5: "chat_intent",
99: "stop",
}
@@ -83,6 +119,8 @@ def process_message(
chat_message_id = data["chat_message_id"]
content = data["content"]
+ chat_intent = self._resolve_chat_intent(chat_message_id, data, content)
+
if event_type == "chat_name":
self.chat_name = data["content"]
self.persist_response(
@@ -120,7 +158,7 @@ def _validate_message(self, group_id, channel_id):
)
return messages
- def _handle_redis_message(self, sid, channel_id, chat_id, chat_intent, group_id, messages, discussion_status: str):
+ def _handle_redis_message(self, sid, channel_id, chat_id, group_id, messages, discussion_status: str):
for _, msg_list in messages:
for message_id, payload in msg_list:
logging.info(f" === Message ID: {message_id} ===")
@@ -129,7 +167,6 @@ def _handle_redis_message(self, sid, channel_id, chat_id, chat_intent, group_id,
sid=sid,
channel_id=channel_id,
chat_id=chat_id,
- chat_intent=chat_intent,
payload=payload,
discussion_status=discussion_status
)
@@ -138,7 +175,7 @@ def _handle_redis_message(self, sid, channel_id, chat_id, chat_intent, group_id,
self.redis_client.xack(channel_id, group_id, message_id)
def __stream_listener(
- self, sid: str, channel_id: str, chat_id: str, chat_message_id: str, chat_intent: str, group_id: str, discussion_status: str
+ self, sid: str, channel_id: str, chat_id: str, chat_message_id: str, group_id: str, discussion_status: str
):
while True:
@@ -148,7 +185,7 @@ def __stream_listener(
if not messages:
continue
- self._handle_redis_message(sid, channel_id, chat_id, chat_intent, group_id, messages, discussion_status)
+ self._handle_redis_message(sid, channel_id, chat_id, group_id, messages, discussion_status)
except redis.exceptions.RedisError as e:
logging.error(f"[REDIS ERROR] {e}")
@@ -196,15 +233,15 @@ def __stream_listener(
)
break
- def listen_to_redis_stream(self, sid: str, channel_id: str, chat_id: str, chat_message_id: str, chat_intent: str, discussion_status: str):
+ def listen_to_redis_stream(self, sid: str, channel_id: str, chat_id: str, chat_message_id: str, discussion_status: str):
"""Listens to the Redis stream from llm server and processes the messages."""
group_id = f"group_{chat_id}_{chat_message_id}"
self.create_redis_xgroup(channel_id, group_id)
- self.__stream_listener(sid, channel_id, chat_id, chat_message_id, chat_intent, group_id, discussion_status)
+ self.__stream_listener(sid, channel_id, chat_id, chat_message_id, group_id, discussion_status)
- def stream_prompt_response(self, sid: str, channel_id: str, chat_id: str, chat_message_id: str, chat_intent: str, discussion_status: str):
+ def stream_prompt_response(self, sid: str, channel_id: str, chat_id: str, chat_message_id: str, discussion_status: str):
"""Starts a background thread to listen redis pubsub channel from AI server"""
- args = (sid, channel_id, chat_id, chat_message_id, chat_intent, discussion_status)
+ args = (sid, channel_id, chat_id, chat_message_id, discussion_status)
try:
sio.start_background_task(self.listen_to_redis_stream, *args)
except Exception as e:
@@ -237,12 +274,10 @@ def process_prompt(self, sid: str, channel_id: str, chat_id: str, chat_message_i
"GENERATE": ChatMessageStatus.GENERATE,
}
if is_retry:
- chat_intent = ChatMessageStatus.TRANSFORM_RETRY
prompt = (
f"Faulty yaml:{chat_message.technical_content} \n Error:{chat_message.transformation_error_message}"
)
else:
- chat_intent = chat_message.chat_intent.name
prompt = chat_message.prompt
if discussion_status in DISCUSSION_STATUS_MAP:
@@ -311,7 +346,6 @@ def process_prompt(self, sid: str, channel_id: str, chat_id: str, chat_message_i
"db_map": db_metadata,
"visitran_model": visitran_models,
"chat_name": chat_name,
- "chat_intent": chat_intent,
"db_type": self.project_instance.database_type,
"llm_model_architect": chat_message.llm_model_architect,
"llm_model_developer": chat_message.llm_model_developer,
@@ -335,7 +369,6 @@ def process_prompt(self, sid: str, channel_id: str, chat_id: str, chat_message_i
channel_id=channel_id,
chat_id=chat_id,
chat_message_id=chat_message_id,
- chat_intent=chat_intent,
discussion_status=chat_message.discussion_type,
)
@@ -347,10 +380,9 @@ def process_prompt(self, sid: str, channel_id: str, chat_id: str, chat_message_i
channel_id=channel_id,
chat_id=chat_id,
chat_message_id=chat_message_id,
- chat_intent=chat_intent,
discussion_status=chat_message.discussion_type,
)
- logging.info(f"process_prompt: chat_intent={chat_intent}, sid={sid}, channel_id={channel_id}")
+ logging.info(f"process_prompt: sid={sid}, channel_id={channel_id}")
chat_message = self._get_chat_message(chat_id=chat_id, chat_message_id=chat_message_id)
return chat_message
diff --git a/backend/backend/core/routers/chat/serializers.py b/backend/backend/core/routers/chat/serializers.py
index 2e8f057..36dfe68 100644
--- a/backend/backend/core/routers/chat/serializers.py
+++ b/backend/backend/core/routers/chat/serializers.py
@@ -5,6 +5,9 @@
class ChatSerializer(serializers.ModelSerializer):
user = UserMinimalSerializer(read_only=True)
+ chat_intent_name = serializers.CharField(
+ source='chat_intent.display_name', read_only=True, default=None
+ )
class Meta:
model = Chat
@@ -13,6 +16,7 @@ class Meta:
'project_id',
'chat_name',
'chat_intent',
+ 'chat_intent_name',
'created_at',
'modified_at',
'is_deleted',
diff --git a/backend/backend/core/routers/chat/views.py b/backend/backend/core/routers/chat/views.py
index 9bd8334..5f283d4 100644
--- a/backend/backend/core/routers/chat/views.py
+++ b/backend/backend/core/routers/chat/views.py
@@ -128,7 +128,6 @@ def persist_prompt(self, request: Request, project_id: str, *args, **kwargs) ->
data = request.data
chat_id = data.get("chat_id")
prompt = data.get("prompt")
- chat_intent_id = data.get("chat_intent_id")
llm_model_architect = data.get("llm_model_architect")
llm_model_developer = data.get("llm_model_developer")
discussion_type = data.get('discussion_status')
@@ -139,26 +138,19 @@ def persist_prompt(self, request: Request, project_id: str, *args, **kwargs) ->
if discussion_type == "GENERATE":
generated_chat_res_id = data.get('final_discussion_id')
- # Check token balance before processing the request
+ # Check token balance before processing the request.
+ # Intent is auto-detected by the AI service, so we don't know it yet —
+ # check against the worst-case (TRANSFORM) cost to avoid letting through
+ # a request the org can't afford.
try:
project = ProjectDetails.objects.get(project_uuid=project_id)
organization = project.organization
- # Determine chat intent name for token calculation
- chat_intent_name = "INFO" # Default
- if chat_intent_id:
- from backend.core.models.chat_intent import ChatIntent
- try:
- chat_intent = ChatIntent.objects.get(chat_intent_id=chat_intent_id)
- chat_intent_name = chat_intent.name
- except ChatIntent.DoesNotExist:
- pass
-
self.fetch_token_balance(
llm_model_architect=llm_model_architect,
llm_model_developer=llm_model_developer,
organization=organization,
- chat_intent_name=chat_intent_name
+ chat_intent_name="TRANSFORM"
)
except ProjectDetails.DoesNotExist:
@@ -172,7 +164,6 @@ def persist_prompt(self, request: Request, project_id: str, *args, **kwargs) ->
chat_message = chat_message_context.persist_prompt(
prompt=prompt,
chat_id=chat_id,
- chat_intent_id=chat_intent_id,
llm_model_architect=llm_model_architect,
llm_model_developer=llm_model_developer,
discussion_type=discussion_type,
diff --git a/backend/backend/core/routers/chat_message/serializers/chat_message_serializer.py b/backend/backend/core/routers/chat_message/serializers/chat_message_serializer.py
index ec9e4a8..0fb345a 100644
--- a/backend/backend/core/routers/chat_message/serializers/chat_message_serializer.py
+++ b/backend/backend/core/routers/chat_message/serializers/chat_message_serializer.py
@@ -11,6 +11,7 @@ class Meta:
class ChatMessageSerializer(serializers.ModelSerializer):
user = UserMinimalSerializer(read_only=True)
+ chat_intent_name = serializers.CharField(source='chat_intent.name', read_only=True, default=None)
class Meta:
model = ChatMessage
@@ -29,6 +30,7 @@ class Meta:
'transformation_status',
'transformation_error_message',
'chat_intent',
+ 'chat_intent_name',
'llm_model_architect',
'llm_model_developer',
'created_at',
diff --git a/backend/backend/core/web_socket.py b/backend/backend/core/web_socket.py
index 8fe2e26..f5c5fbc 100644
--- a/backend/backend/core/web_socket.py
+++ b/backend/backend/core/web_socket.py
@@ -263,6 +263,7 @@ def send_socket_message(sid, channel_id, **kwargs):
"is_retry_transform",
"discussion_status",
"token_usage_data", # Add token usage data
+ "chat_intent_name",
]
unsupported_args = [arg for arg in kwargs.keys() if arg not in allowed_args]
diff --git a/frontend/src/ide/chat-ai/ActionButtons.jsx b/frontend/src/ide/chat-ai/ActionButtons.jsx
index 01b1dc5..bb96dba 100644
--- a/frontend/src/ide/chat-ai/ActionButtons.jsx
+++ b/frontend/src/ide/chat-ai/ActionButtons.jsx
@@ -25,7 +25,6 @@ const INFO_APPROVED =
const ActionButtons = memo(function ActionButtons({
chatMessageId,
savePrompt,
- selectedChatIntent,
isLatestTransform,
uiAction,
message,
@@ -101,12 +100,12 @@ const ActionButtons = memo(function ActionButtons({
setTimeout(() => setIsOperationInProgress(false), 3000);
if (value === "GENERATE") {
- savePrompt?.(label, selectedChatIntent, false, value, chatMessageId);
+ savePrompt?.(label, false, value, chatMessageId);
return;
}
- savePrompt?.(label, selectedChatIntent, false, value);
+ savePrompt?.(label, false, value);
},
- [savePrompt, selectedChatIntent, chatMessageId, isOperationInProgress]
+ [savePrompt, chatMessageId, isOperationInProgress]
);
const onApplyClick = useCallback(() => {
@@ -321,7 +320,6 @@ ActionButtons.displayName = "ActionButtons";
ActionButtons.propTypes = {
chatMessageId: PropTypes.string.isRequired,
savePrompt: PropTypes.func,
- selectedChatIntent: PropTypes.string,
isLatestTransform: PropTypes.bool.isRequired,
uiAction: PropTypes.object,
message: PropTypes.object,
diff --git a/frontend/src/ide/chat-ai/Body.jsx b/frontend/src/ide/chat-ai/Body.jsx
index 9ce04bd..dd7d792 100644
--- a/frontend/src/ide/chat-ai/Body.jsx
+++ b/frontend/src/ide/chat-ai/Body.jsx
@@ -47,8 +47,6 @@ const Body = function Body({
onSendButtonClick,
}) {
const [isGetChatMessages, setIsGetChatMessages] = useState(false);
- const [chatIntents, setChatIntents] = useState([]);
- const [selectedChatIntent, setSelectedChatIntent] = useState(null);
const [llmModels, setLlmModels] = useState([]);
const [selectedLlmModel, setSelectedLlmModel] = useState(null);
const [selectedCoderLlmModel, setSelectedCoderLlmModel] = useState(null);
@@ -75,8 +73,7 @@ const Body = function Body({
const explorerData = useExplorerStore((state) => state.explorerData);
const dbExplorerData = useExplorerStore((state) => state.dbExplorerData);
- const { postChatPrompt, getChatIntents, getChatLlmModels } =
- useChatAIService();
+ const { postChatPrompt, getChatLlmModels } = useChatAIService();
const { notify } = useNotificationService();
useEffect(() => {
@@ -100,19 +97,6 @@ const Body = function Body({
}
}, [pendingChannel, isConnected, createChannel]);
- useEffect(() => {
- if (!projectId || chatIntents?.length > 0 || !isChatDrawerOpen) return;
-
- getChatIntents()
- .then((data) => {
- setChatIntents(data);
- })
- .catch((error) => {
- console.error(error);
- notify({ error });
- });
- }, [projectId, isChatDrawerOpen]);
-
useEffect(() => {
if (!projectId || llmModels?.length > 0 || !isChatDrawerOpen) return;
@@ -234,55 +218,9 @@ const Body = function Body({
setIsGetChatMessages(false);
}, []);
- // Auto-select intent based on onboarding step mode
- useEffect(() => {
- if (!isOnboardingMode || !currentOnboardingStep || !chatIntents.length)
- return;
-
- const step = currentOnboardingStep;
- let targetIntentName;
-
- // Map onboarding mode to intent name
- switch (step.mode) {
- case "transform":
- targetIntentName = "TRANSFORM";
- break;
- case "sql":
- targetIntentName = "SQL";
- break;
- case "chat":
- targetIntentName = "INFO";
- break;
- default:
- return;
- }
-
- // Find the intent with the matching name
- const targetIntent = chatIntents.find(
- (intent) => intent?.name === targetIntentName
- );
-
- if (targetIntent && selectedChatIntent !== targetIntent.chat_intent_id) {
- setSelectedChatIntent(targetIntent.chat_intent_id);
-
- // Add a visual animation hint
- setTimeout(() => {
- // You could add a toast notification here if needed
- // notify({ message: `Switched to ${targetIntentName} mode for this step` });
- }, 100);
- }
- }, [
- isOnboardingMode,
- currentOnboardingStep,
- chatIntents,
- selectedChatIntent,
- setSelectedChatIntent,
- ]);
-
const savePrompt = useCallback(
(
prompt,
- selectedChatIntent,
isNewChat = false,
discussionStatus = null,
chatMessageId = null
@@ -290,9 +228,8 @@ const Body = function Body({
postChatPrompt({
prompt,
llm_model_architect: selectedLlmModel,
- llm_model_developer: selectedCoderLlmModel,
+ llm_model_developer: selectedCoderLlmModel || selectedLlmModel,
chatId: selectedChatId,
- chatIntentId: selectedChatIntent,
discussionStatus,
chatMessageId,
})
@@ -376,9 +313,6 @@ const Body = function Body({
isGetChatMessages={isGetChatMessages}
resetChatMessageIdentifier={resetChatMessageIdentifier}
isPromptRunning={isPromptRunning}
- chatIntents={chatIntents}
- selectedChatIntent={selectedChatIntent}
- setSelectedChatIntent={setSelectedChatIntent}
llmModels={llmModels}
selectedLlmModel={selectedLlmModel}
setSelectedLlmModel={setSelectedLlmModel}
@@ -421,9 +355,6 @@ const Body = function Body({
savePrompt={savePrompt}
triggerGetChatMessagesApi={triggerGetChatMessagesApi}
isPromptRunning={isPromptRunning}
- chatIntents={chatIntents}
- selectedChatIntent={selectedChatIntent}
- setSelectedChatIntent={setSelectedChatIntent}
llmModels={llmModels}
selectedLlmModel={selectedLlmModel}
setSelectedLlmModel={setSelectedLlmModel}
diff --git a/frontend/src/ide/chat-ai/ChatAI.jsx b/frontend/src/ide/chat-ai/ChatAI.jsx
index ff2c654..e651b02 100644
--- a/frontend/src/ide/chat-ai/ChatAI.jsx
+++ b/frontend/src/ide/chat-ai/ChatAI.jsx
@@ -175,6 +175,10 @@ const ChatAI = memo(
existing.token_usage_data = msg?.token_usage_data;
}
+ if (msg?.chat_intent_name) {
+ existing.chat_intent_name = msg.chat_intent_name;
+ }
+
updatedMessages[idx] = existing;
}
uuidsToRemove.push(msg?.uuid);
diff --git a/frontend/src/ide/chat-ai/Conversation.jsx b/frontend/src/ide/chat-ai/Conversation.jsx
index 5cc70f2..3f48fb1 100644
--- a/frontend/src/ide/chat-ai/Conversation.jsx
+++ b/frontend/src/ide/chat-ai/Conversation.jsx
@@ -14,7 +14,6 @@ import ModelGenerationProgress from "./ModelGenerationProgress";
const Conversation = memo(function Conversation({
savePrompt,
message,
- chatIntents,
isPromptRunning,
isLastConversation,
selectedChatId,
@@ -22,7 +21,6 @@ const Conversation = memo(function Conversation({
triggerRetryTransform,
handleSqlRun,
isLatestTransform,
- selectedChatIntent,
}) {
const userDetails = useUserStore((state) => state.userDetails);
const [detectedAction, setDetectedAction] = useState(null);
@@ -36,9 +34,9 @@ const Conversation = memo(function Conversation({
const handleTroubleshoot = useCallback(
(errorMessage) => {
const prompt = `There was an error encountered. We have the detailed error message below. Please see how we can fix this:\n\n${errorMessage}`;
- savePrompt(prompt, selectedChatIntent);
+ savePrompt(prompt);
},
- [savePrompt, selectedChatIntent]
+ [savePrompt]
);
// Create UI action object based on detected action
@@ -80,14 +78,18 @@ const Conversation = memo(function Conversation({
}, [message?.transformation_status]);
/** --------------------------------------------------------------------
- * Derive the intent once; re-computes only when its deps change.
+ * Derive the intent from the message itself (auto-detected by AI server,
+ * surfaced via chat_intent_name on the serialized chat message).
* ------------------------------------------------------------------- */
const intent = useMemo(
() =>
- chatIntents.find(
- ({ chat_intent_id }) => chat_intent_id === message?.chat_intent
- ),
- [chatIntents, message?.chat_intent]
+ message?.chat_intent_name
+ ? {
+ chat_intent_id: message?.chat_intent,
+ name: message.chat_intent_name,
+ }
+ : null,
+ [message?.chat_intent, message?.chat_intent_name]
);
// Memoize errorDetails to prevent unnecessary re-renders of PromptInfo
@@ -161,7 +163,6 @@ const Conversation = memo(function Conversation({
handleSqlRun={handleSqlRun}
isLatestTransform={isLatestTransform}
savePrompt={savePrompt}
- selectedChatIntent={selectedChatIntent}
uiAction={uiAction}
/>
)}
@@ -188,7 +189,6 @@ const Conversation = memo(function Conversation({
Conversation.propTypes = {
savePrompt: PropTypes.func.isRequired,
message: PropTypes.object.isRequired,
- chatIntents: PropTypes.array.isRequired,
isPromptRunning: PropTypes.bool.isRequired,
isLastConversation: PropTypes.bool.isRequired,
selectedChatId: PropTypes.string.isRequired,
@@ -196,7 +196,6 @@ Conversation.propTypes = {
triggerRetryTransform: PropTypes.bool.isRequired,
handleSqlRun: PropTypes.func.isRequired,
isLatestTransform: PropTypes.bool.isRequired,
- selectedChatIntent: PropTypes.string,
};
Conversation.displayName = "Conversation";
diff --git a/frontend/src/ide/chat-ai/ExistingChat.jsx b/frontend/src/ide/chat-ai/ExistingChat.jsx
index eafeaf2..683dab3 100644
--- a/frontend/src/ide/chat-ai/ExistingChat.jsx
+++ b/frontend/src/ide/chat-ai/ExistingChat.jsx
@@ -38,9 +38,6 @@ const ExistingChat = memo(function ExistingChat({
isGetChatMessages,
resetChatMessageIdentifier,
isPromptRunning,
- chatIntents,
- selectedChatIntent,
- setSelectedChatIntent,
llmModels,
selectedLlmModel,
setSelectedLlmModel,
@@ -236,15 +233,11 @@ const ExistingChat = memo(function ExistingChat({
};
const lastTransformIndex = useMemo(() => {
- const intentsMap = chatIntents.reduce((acc, ci) => {
- acc[ci?.chat_intent_id] = ci?.name;
- return acc;
- }, {});
for (let i = chatMessages.length - 1; i >= 0; i--) {
- if (intentsMap[chatMessages[i]?.chat_intent] === "TRANSFORM") return i;
+ if (chatMessages[i]?.chat_intent_name === "TRANSFORM") return i;
}
return -1;
- }, [chatMessages, chatIntents]);
+ }, [chatMessages]);
// Check if response is actively streaming (thought chain done, response started)
const isResponseStreaming = useMemo(() => {
@@ -395,7 +388,6 @@ const ExistingChat = memo(function ExistingChat({