diff --git a/backend/src/agents/generalist_agent.py b/backend/src/agents/generalist_agent.py index afa72e5f..db6fa286 100644 --- a/backend/src/agents/generalist_agent.py +++ b/backend/src/agents/generalist_agent.py @@ -17,39 +17,6 @@ ) class GeneralistAgent(ChatAgent): async def invoke(self, utterance) -> str: - try: - answer_to_user = await answer_user_question(utterance, self.llm, self.model) - answer_result = json.loads(answer_to_user) - final_answer = json.loads(answer_result["response"]).get("answer", "") - if not final_answer: - response = {"content": "Error in answer format.", "ignore_validation": "false"} - return json.dumps(response, indent=4) - logger.info(f"Answer found successfully {final_answer}") - response = {"content": final_answer, "ignore_validation": "false"} - return json.dumps(response, indent=4) - - except Exception as e: - logger.error(f"Error in web_general_search_core: {e}") - return "An error occurred while processing the search query." - - -async def answer_user_question(search_query, llm, model) -> str: - try: - summariser_prompt = engine.load_prompt("generalist-answer", question=search_query) - response = await llm.chat(model, summariser_prompt, "") - return json.dumps( - { - "status": "success", - "response": response, - "error": None, - } - ) - except Exception as e: - logger.error(f"Error during create search term: {e}") - return json.dumps( - { - "status": "error", - "response": None, - "error": str(e), - } - ) + summariser_prompt = engine.load_prompt("generalist-answer", question=utterance) + response = await self.llm.chat(self.model, summariser_prompt, "") + return json.dumps({"content": response, "ignore_validation": "false"}, indent=4) diff --git a/backend/src/agents/materiality_agent.py b/backend/src/agents/materiality_agent.py index 8d4c7195..2b277e81 100644 --- a/backend/src/agents/materiality_agent.py +++ b/backend/src/agents/materiality_agent.py @@ -27,14 +27,15 @@ def create_llm_files(filenames: list[str]) -> list[LLMFile]: class MaterialityAgent(ChatAgent): async def invoke(self, utterance: str) -> str: materiality_files = await self.select_material_files(utterance) - if not materiality_files: - return f"Materiality Agent cannot find suitable reference documents to answer the question: {utterance}" - answer = await self.llm.chat_with_file( - self.model, - system_prompt=engine.load_prompt("answer-materiality-question"), - user_prompt=utterance, - files=create_llm_files(materiality_files) - ) + if materiality_files: + answer = await self.llm.chat_with_file( + self.model, + system_prompt=engine.load_prompt("answer-materiality-question"), + user_prompt=utterance, + files=create_llm_files(materiality_files) + ) + else: + answer = f"Materiality Agent cannot find suitable reference documents to answer the question: {utterance}" return json.dumps({"content": answer, "ignore_validation": False}) async def list_material_topics_for_company(self, company_name: str) -> dict[str, str]: diff --git a/backend/src/directors/report_director.py b/backend/src/directors/report_director.py index 776c5e02..5ada9a87 100644 --- a/backend/src/directors/report_director.py +++ b/backend/src/directors/report_director.py @@ -47,8 +47,8 @@ def create_report_chat_message(filename: str, company_name: str, topics: dict[st if topics: topics_with_markdown = [f"{key}\n{value}" for key, value in topics.items()] report_chat_message += f""" - -The following materiality topics were identified for {company_name} which the report focuses on: + +The following materiality topics were identified for {company_name}: {"\n\n".join(topics_with_markdown)}""" return report_chat_message