Skip to content

Commit

Permalink
Update error message for LLM status and enhance response validation i…
Browse files Browse the repository at this point in the history
…n OllamaModel
  • Loading branch information
Sedrowow committed Sep 16, 2024
1 parent 1362dac commit fc17fb2
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 3 deletions.
2 changes: 1 addition & 1 deletion app/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def execute(self, user_request: str, step_num: int = 0) -> Optional[str]:
self.interrupt_execution = False

if not self.llm:
status = 'oops something went wrong'
status = 'LLM not running corectly'
self.status_queue.put(status)
return status

Expand Down
8 changes: 6 additions & 2 deletions app/models/ollama_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def send_message_to_llm(self, formatted_user_request: str) -> Any:
model=self.model_name,
messages=[
{
'role': 'instructions',
'role': 'context/instructions',
'content': self.context
},
{
Expand All @@ -57,7 +57,11 @@ def send_message_to_llm(self, formatted_user_request: str) -> Any:

def convert_llm_response_to_json_instructions(self, llm_response: Any) -> dict[str, Any]:
try:
llm_response_data = llm_response['choices'][0]['text'].strip()
if llm_response and 'choices' in llm_response and len(llm_response['choices']) > 0:
llm_response_data = llm_response['choices'][0]['text'].strip()
else:
print('Invalid LLM response format')
return {}
except (KeyError, IndexError) as e:
print(f'Error while accessing LLM response - {e}')
return {}
Expand Down

0 comments on commit fc17fb2

Please sign in to comment.