|
18 | 18 | from langchain_core.messages import HumanMessage, SystemMessage
|
19 | 19 | from langchain_core.messages.ai import AIMessageChunk
|
20 | 20 | from langchain_core.tools.base import BaseTool
|
21 |
| -from langchain_openai import ChatOpenAI |
22 | 21 | from langgraph.checkpoint.base import BaseCheckpointSaver
|
23 | 22 | from langgraph.graph import MessagesState, StateGraph
|
24 | 23 | from langgraph.graph.graph import CompiledGraph
|
25 | 24 | from langgraph.prebuilt import ToolNode, create_react_agent, tools_condition
|
26 | 25 | from langgraph.prebuilt.chat_agent_executor import AgentState
|
27 |
| -from openai import BadRequestError |
28 |
| -from typing_extensions import TypedDict |
29 |
| - |
30 |
| -from ai_chatbots import tools |
31 |
| -from ai_chatbots.api import get_search_tool_metadata |
32 |
| -from ai_chatbots.models import TutorBotOutput |
33 |
| -from ai_chatbots.tools import get_video_transcript_chunk, search_content_files |
34 | 26 | from open_learning_ai_tutor.message_tutor import message_tutor
|
35 | 27 | from open_learning_ai_tutor.tools import tutor_tools
|
36 | 28 | from open_learning_ai_tutor.utils import (
|
37 | 29 | json_to_intent_list,
|
38 | 30 | json_to_messages,
|
39 | 31 | tutor_output_to_json,
|
40 | 32 | )
|
| 33 | +from openai import BadRequestError |
| 34 | +from typing_extensions import TypedDict |
| 35 | + |
| 36 | +from ai_chatbots import tools |
| 37 | +from ai_chatbots.api import get_search_tool_metadata |
| 38 | +from ai_chatbots.models import TutorBotOutput |
| 39 | +from ai_chatbots.tools import get_video_transcript_chunk, search_content_files |
41 | 40 |
|
42 | 41 | log = logging.getLogger(__name__)
|
43 | 42 |
|
@@ -473,28 +472,6 @@ def __init__( # noqa: PLR0913
|
473 | 472 | edx_module_id, block_siblings
|
474 | 473 | )
|
475 | 474 |
|
476 |
| - def get_llm(self, **kwargs) -> BaseChatModel: |
477 |
| - """ |
478 |
| - Return the LLM instance for the chatbot. |
479 |
| - Set it up to use a proxy, with required proxy kwargs, if applicable. |
480 |
| - """ |
481 |
| - llm = ChatOpenAI( |
482 |
| - model=f"{self.proxy_prefix}{self.model}", |
483 |
| - **( |
484 |
| - self.proxy.get_api_kwargs( |
485 |
| - base_url_key="base_url", api_key_key="openai_api_key" |
486 |
| - ) |
487 |
| - if self.proxy |
488 |
| - else {} |
489 |
| - ), |
490 |
| - **(self.proxy.get_additional_kwargs(self) if self.proxy else {}), |
491 |
| - **kwargs, |
492 |
| - ) |
493 |
| - # Set the temperature if it's supported by the model |
494 |
| - if self.temperature and self.model not in settings.AI_UNSUPPORTED_TEMP_MODELS: |
495 |
| - llm.temperature = self.temperature |
496 |
| - return llm |
497 |
| - |
498 | 475 | async def get_tool_metadata(self) -> str:
|
499 | 476 | """Return the metadata for the tool"""
|
500 | 477 | return json.dumps(
|
@@ -534,18 +511,18 @@ async def get_completion(
|
534 | 511 | response = ""
|
535 | 512 |
|
536 | 513 | try:
|
537 |
| - new_history, new_intent_history, new_assessment_history, metadata = ( |
538 |
| - message_tutor( |
539 |
| - self.problem, |
540 |
| - self.problem_set, |
541 |
| - self.llm, |
542 |
| - [HumanMessage(content=message)], |
543 |
| - chat_history, |
544 |
| - assessment_history, |
545 |
| - intent_history, |
546 |
| - tools=tutor_tools, |
547 |
| - ) |
| 514 | + new_history, new_intent_history, new_assessment_history = message_tutor( |
| 515 | + self.problem, |
| 516 | + self.problem_set, |
| 517 | + self.llm, |
| 518 | + [HumanMessage(content=message)], |
| 519 | + chat_history, |
| 520 | + assessment_history, |
| 521 | + intent_history, |
| 522 | + tools=tutor_tools, |
548 | 523 | )
|
| 524 | + |
| 525 | + metadata = {"edx_module_id": self.edx_module_id, "tutor_model": self.model} |
549 | 526 | json_output = tutor_output_to_json(
|
550 | 527 | new_history, new_intent_history, new_assessment_history, metadata
|
551 | 528 | )
|
|
0 commit comments