-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
FS 121: materiality chat agent #59
Changes from 13 commits
73201c5
0177b7e
45808a2
db7b5b4
e4f0e0e
b38d629
bf0b4d2
cd80122
7d9e146
688533e
195f990
0e62eec
fd867e4
3fe59b2
d1d8c41
6971cbc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,15 +3,55 @@ | |
import logging | ||
|
||
from src.llm import LLMFile | ||
from src.agents import Agent | ||
from src.agents import ChatAgent, chat_agent | ||
from src.prompts import PromptEngine | ||
|
||
engine = PromptEngine() | ||
logger = logging.getLogger(__name__) | ||
|
||
|
||
class MaterialityAgent(Agent): | ||
async def list_material_topics(self, company_name: str) -> dict[str, str]: | ||
def create_llm_files(filenames: list[str]) -> list[LLMFile]: | ||
return [ | ||
LLMFile(filename=filename, file=Path(f"./library/{filename}")) | ||
for filename in filenames | ||
] | ||
|
||
|
||
@chat_agent( | ||
name="MaterialityAgent", | ||
description="This agent can help answer questions about ESG Materiality, what topics are relevant to a company" | ||
"or sector and explain materiality topics in detail. The Materiality Agent can also answer" | ||
"questions about typical sector activities, value chain and business relationships.", | ||
tools=[] | ||
) | ||
class MaterialityAgent(ChatAgent): | ||
async def invoke(self, utterance: str) -> str: | ||
materiality_files = await self.select_material_files(utterance) | ||
if materiality_files: | ||
answer = await self.llm.chat_with_file( | ||
self.model, | ||
system_prompt=engine.load_prompt("answer-materiality-question"), | ||
user_prompt=utterance, | ||
files=create_llm_files(materiality_files) | ||
) | ||
else: | ||
answer = f"Materiality Agent cannot find suitable reference documents to answer the question: {utterance}" | ||
return json.dumps({"content": answer, "ignore_validation": False}) | ||
|
||
async def list_material_topics_for_company(self, company_name: str) -> dict[str, str]: | ||
materiality_files = await self.select_material_files(company_name) | ||
if not materiality_files: | ||
logger.info(f"No materiality reference documents could be found for {company_name}") | ||
return {} # TODO this needs fixing | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this needs fixing apparently There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think I'll just leave it as is (and remove the comment) |
||
materiality_topics = await self.llm.chat_with_file( | ||
self.model, | ||
system_prompt=engine.load_prompt("list-material-topics-system-prompt"), | ||
user_prompt=f"What topics are material for {company_name}?", | ||
files=create_llm_files(materiality_files) | ||
) | ||
return json.loads(materiality_topics)["material_topics"] | ||
|
||
async def select_material_files(self, utterance) -> list[str]: | ||
with open('./library/catalogue.json') as file: | ||
catalogue = json.load(file) | ||
files_json = await self.llm.chat( | ||
|
@@ -20,17 +60,7 @@ async def list_material_topics(self, company_name: str) -> dict[str, str]: | |
"select-material-files-system-prompt", | ||
catalogue=catalogue | ||
), | ||
user_prompt=company_name, | ||
user_prompt=utterance, | ||
return_json=True | ||
) | ||
|
||
materiality_topics = await self.llm.chat_with_file( | ||
self.model, | ||
system_prompt=engine.load_prompt("list-material-topics-system-prompt"), | ||
user_prompt=f"What topics are material for {company_name}?", | ||
files=[ | ||
LLMFile(file_name=file_name, file=Path(f"./library/{file_name}")) | ||
for file_name in json.loads(files_json)["files"] | ||
] | ||
) | ||
return json.loads(materiality_topics)["material_topics"] | ||
return json.loads(files_json)["files"] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why get Diana to implement Mistral and then just change it back again?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We want the report agent on openai because the file processing is more advanced. We want Mistral to be able to handle "chat_with_file" method so that if someone did have it on Mistral the app wouldn't crash
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It seemed more complete this way, but that's fair, we could have split that work into a tech-debt ticket instead