Skip to content

Commit

Permalink
Merge pull request #611 from sambhavnoobcoder/Longterm-Memory-Access-…
Browse files Browse the repository at this point in the history
…Issue

Ensure tool execution regardless of long-term memory usage
  • Loading branch information
kyegomez authored Oct 25, 2024
2 parents f31fa4f + 486a3cf commit 46c206c
Showing 1 changed file with 64 additions and 53 deletions.
117 changes: 64 additions & 53 deletions swarms/structs/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -799,51 +799,61 @@ def _run(
while attempt < self.retry_attempts and not success:
try:
if self.long_term_memory is not None:
logger.info(
"Querying long term memory..."
)
logger.info("Querying long term memory...")
self.memory_query(task_prompt)


# Generate response using LLM
response_args = (
(task_prompt, *args) if img is None else (task_prompt, img, *args)
)
response = self.call_llm(*response_args, **kwargs)

# Check if response is a dictionary and has 'choices' key
if isinstance(response, dict) and 'choices' in response:
response = response['choices'][0]['message']['content']
elif isinstance(response, str):
# If response is already a string, use it as is
pass
else:
response_args = (
(task_prompt, *args)
if img is None
else (task_prompt, img, *args)
)
response = self.call_llm(
*response_args, **kwargs
)

# Log the step metadata
logged = self.log_step_metadata(
loop_count, task_prompt, response
)
logger.info(logged)

# Conver to a str if the response is not a str
response = self.llm_output_parser(
response
raise ValueError(
f"Unexpected response format: {type(response)}"
)

# Print
if self.streaming_on is True:
self.stream_response(response)
else:
print(response)

# Add the response to the memory
self.short_memory.add(
role=self.agent_name, content=response
)

# Add to all responses
all_responses.append(response)

# TODO: Implement reliablity check

# Check and execute tools
if self.tools is not None:
print(f"self.tools is not None: {response}")
self.parse_and_execute_tools(response)

# Log the step metadata
logged = self.log_step_metadata(
loop_count,
task_prompt,
response
)
logger.info(logged)

# Convert to a str if the response is not a str
response = self.llm_output_parser(response)

# Print
if self.streaming_on is True:
self.stream_response(response)
else:
print(response)

# Add the response to the memory
self.short_memory.add(
role=self.agent_name,
content=response
)

# Add to all responses
all_responses.append(response)

# TODO: Implement reliability check
if self.tools is not None:
# self.parse_function_call_and_execute(response)
self.parse_and_execute_tools(response)

# if self.code_interpreter is True:
# # Parse the code and execute
# logger.info("Parsing code and executing...")
Expand Down Expand Up @@ -1846,20 +1856,21 @@ async def count_tokens_and_subtract_from_context_window(

return response

def llm_output_parser(self, response: Any) -> str:
"""
Parses the response from the LLM (Low-Level Monitor) and returns it as a string.
Args:
response (Any): The response from the LLM.
Returns:
str: The parsed response as a string.
"""
if response is not str:
response = str(response)

return response
def llm_output_parser(self, response):
"""Parse the output from the LLM"""
try:
if isinstance(response, dict):
if 'choices' in response:
return response['choices'][0]['message']['content']
else:
return json.dumps(response) # Convert dict to string
elif isinstance(response, str):
return response
else:
return str(response) # Convert any other type to string
except Exception as e:
logger.error(f"Error parsing LLM output: {e}")
return str(response) # Return string representation as fallback

def log_step_metadata(
self, loop: int, task: str, response: str
Expand Down

0 comments on commit 46c206c

Please sign in to comment.