Skip to content

Commit

Permalink
Refactor OllamaModel to remove context from request data and update m…
Browse files Browse the repository at this point in the history
…essage format for LLM interaction
  • Loading branch information
Sedrowow committed Sep 16, 2024
1 parent 9a6a4d1 commit 1362dac
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 3 deletions.
3 changes: 3 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"python.REPL.enableREPLSmartSend": false
}
14 changes: 11 additions & 3 deletions app/models/ollama_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ def format_user_request_for_llm(self, original_user_request, step_num, screensho
str: The formatted request as a JSON string.
"""
request_data = {
'context': self.context,
'original_user_request': original_user_request,
'step_num': step_num
}
Expand All @@ -38,9 +37,18 @@ def format_user_request_for_llm(self, original_user_request, step_num, screensho

def send_message_to_llm(self, formatted_user_request: str) -> Any:
try:
response = ollama.generate(
response = ollama.chat(
model=self.model_name,
prompt=formatted_user_request
messages=[
{
'role': 'instructions',
'content': self.context
},
{
'role': 'prompt',
'content': formatted_user_request
},
]
)
return response
except Exception as e:
Expand Down

0 comments on commit 1362dac

Please sign in to comment.