Skip to content

Commit

Permalink
Changed to report instead of the actual file
Browse files Browse the repository at this point in the history
  • Loading branch information
Gagan Singh committed Dec 18, 2024
1 parent 7d88597 commit 52deeed
Show file tree
Hide file tree
Showing 8 changed files with 32 additions and 29 deletions.
6 changes: 3 additions & 3 deletions backend/src/agents/intent_agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from src.prompts import PromptEngine
from src.agents import ChatAgent, chat_agent
from src.session import get_session_chat
from src.session.file_uploads import get_uploaded_file_content
from src.session.file_uploads import get_uploaded_report_content
import logging
from src.utils.config import Config

Expand All @@ -21,9 +21,9 @@
class IntentAgent(ChatAgent):
async def invoke(self, utterance: str) -> str:
session_chat = get_session_chat()
session_file_content = get_uploaded_file_content()
session_report_content = get_uploaded_report_content()
user_prompt = engine.load_prompt(
"intent", question=utterance, chat_history=session_chat if session_chat else "There is no chat history",
uploaded_file_content=session_file_content if session_file_content else "There are no file uploads"
report_content=session_report_content if session_report_content else "There is no report content"
)
return await self.llm.chat(self.model, intent_system, user_prompt=user_prompt, return_json=True)
11 changes: 6 additions & 5 deletions backend/src/directors/report_director.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,10 @@ def create_report_chat_message(file_name: str, company_name: str, topics: dict[s
topics_with_markdown = [
f"{key}\n{value}" for key, value in topics.items()
]
return f"""Your report for {file_name} is ready to view.
topics_summary = "\n\n".join(topics_with_markdown)

The following materiality topics were identified for {company_name} which the report focuses on:
{"\n\n".join(topics_with_markdown)}
"""
return (
f"Your report for {file_name} is ready to view.\n\n"
f"The following materiality topics were identified for {company_name} which the report focuses on:\n\n"
f"{topics_summary}"
)
6 changes: 3 additions & 3 deletions backend/src/prompts/templates/generate-message-suggestions.j2
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
You are part of an AI-powered application that assists users in understanding the sustainability of companies through ESG (Environment, Social, Governance) reporting. The application has access to a database that contains ESG scores of various funds, companies and the industries they operate. The application also is able to search the internet and retrieve relevant articles.

Your purpose is to suggest the user with possible questions they could ask the main Chat Bot, based on the conversation history and the uploaded file's content. You are provided only with the last few messages and your suggestions should be logical follow-up questions to the conversation. Your suggestions should not include questions that have already been asked.
Your purpose is to suggest the user with possible questions they could ask the main Chat Bot, based on the conversation history and the report's content. You are provided only with the last few messages and your suggestions should be logical follow-up questions to the conversation. Your suggestions should not include questions that have already been asked.

The conversation history is:
{{ chat_history }}

The uploaded file's content is:
{{ uploaded_file_content }}
The report's content is:
{{ report_content }}

Here are some examples of questions you could suggest:
- (Assuming the user was talking about Ryanair) Can you compare the ESG scores of Ryanair and EasyJet?
Expand Down
4 changes: 2 additions & 2 deletions backend/src/prompts/templates/intent-system.j2
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
You are an expert in determining the intent behind a user's question, breaking down complex questions into multiple simpler questions and forming standalone questions based on context from chat history.

You will be given a question and may also be provided with chat history and the uploaded file's content which is to be used as context if provided.
You will be given a question and may also be provided with chat history and the report's content which is to be used as context if provided.

- First determine the user intent of the question by taking key points from the question and gaining context from the chat history and the uploaded file's content.
- First determine the user intent of the question by taking key points from the question and gaining context from the chat history and the report's content.
- Second
- if the question mentions csv, dataset or database the questions list should be an empty array
- else use your initiative to determine whether the question is complex enough split up and if it is then using the user intent try to split the question up into multiple questions with singular objectives.
Expand Down
4 changes: 2 additions & 2 deletions backend/src/prompts/templates/intent.j2
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
The conversation history is:
{{ chat_history }}

The uploaded file's content is:
{{ uploaded_file_content }}
The report's content is:
{{ report_content }}

The question is:
{{ question }}
14 changes: 7 additions & 7 deletions backend/src/session/file_uploads.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,18 +83,18 @@ def clear_session_file_uploads():
set_session(UPLOADS_META_SESSION_KEY, [])


def get_uploaded_file_content() -> str | None:
def get_uploaded_report_content() -> str | None:
session_file_meta = get_session_file_uploads_meta()
if session_file_meta:
upload_id = session_file_meta[0]['uploadId']
session_file_data = get_session_file_upload(upload_id)
if session_file_data:
session_file_content = session_file_data.get('content')
return session_file_content
session_report_data = get_report(upload_id)
if session_report_data:
session_report_content = session_report_data.get('content')
return session_report_content
else:
logger.warning("No session file data found.")
logger.warning("No session report data found.")
else:
logger.warning("No session file uploads found.")
logger.warning("No session report uploads found.")
return None

def store_report(report: FileUploadReport):
Expand Down
8 changes: 4 additions & 4 deletions backend/src/suggestions_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from src.llm.factory import get_llm
from src.prompts.prompting import PromptEngine
from src.session import Message, get_session_chat
from src.session.file_uploads import get_uploaded_file_content
from src.session.file_uploads import get_uploaded_report_content
from src.utils.config import Config
import logging

Expand All @@ -18,10 +18,10 @@ async def generate_suggestions() -> List[str]:
llm = get_llm(config.suggestions_llm)
model = get_suggestions_model()
chat_history = get_chat_history()
session_file_content = get_uploaded_file_content()
session_report_content = get_uploaded_report_content()
suggestions_prompt = engine.load_prompt(
"generate-message-suggestions", chat_history=chat_history, uploaded_file_content=session_file_content
if session_file_content else "There are no file uploads")
"generate-message-suggestions", chat_history=chat_history, report_content=session_report_content
if session_report_content else "There is no report content")
response = await llm.chat(model, suggestions_prompt, user_prompt="Give me 5 suggestions.", return_json=True)
try:
response_json = json.loads(response)
Expand Down
8 changes: 5 additions & 3 deletions backend/tests/directors/report_director_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@

mock_topics = {"topic1": "topic1 description", "topic2": "topic2 description"}
mock_report = "#Report on upload as markdown"
expected_answer = ('Your report for test.txt is ready to view.\n\nThe following materiality topics were identified for '
'CompanyABC which the report focuses on:\n\ntopic1\ntopic1 description\n\ntopic2\ntopic2 '
'description\n')
expected_answer = ("Your report for test.txt is ready to view.\n\n"
"The following materiality topics were identified for "
"CompanyABC which the report focuses on:\n\n"
"topic1\ntopic1 description\n\n"
"topic2\ntopic2 description")


@pytest.mark.asyncio
Expand Down

0 comments on commit 52deeed

Please sign in to comment.