Skip to content

Commit

Permalink
Merge branch 'kyegomez:master' into JSON-Output-Support-Agent
Browse files Browse the repository at this point in the history
  • Loading branch information
sambhavnoobcoder authored Oct 28, 2024
2 parents a63541c + 2387096 commit 247ecc6
Show file tree
Hide file tree
Showing 10 changed files with 317 additions and 219 deletions.
1 change: 1 addition & 0 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# 5.8.7
2 changes: 1 addition & 1 deletion docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jinja2~=3.1
markdown~=3.7
mkdocs-material-extensions~=1.3
pygments~=2.18
pymdown-extensions~=10.10
pymdown-extensions~=10.11

# Requirements for plugins
babel~=2.16
Expand Down
9 changes: 6 additions & 3 deletions example.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,14 @@
context_length=200000,
return_step_meta=False,
# output_type="json",
output_type="string",
output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
streaming_on=False,
# auto_generate_prompt=True,
)


agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
print(
agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
)
28 changes: 28 additions & 0 deletions new_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from swarms import Prompt
from swarm_models import OpenAIChat
import os

model = OpenAIChat(
api_key=os.getenv("OPENAI_API_KEY"), model_name="gpt-4o-mini", temperature=0.1
)

# Aggregator system prompt
prompt_generator_sys_prompt = Prompt(
name="prompt-generator-sys-prompt-o1",
description="Generate the most reliable prompt for a specific problem",
content="""
Your purpose is to craft extremely reliable and production-grade system prompts for other agents.
# Instructions
- Understand the prompt required for the agent.
- Utilize a combination of the most effective prompting strategies available, including chain of thought, many shot, few shot, and instructions-examples-constraints.
- Craft the prompt by blending the most suitable prompting strategies.
- Ensure the prompt is production-grade ready and educates the agent on how to reason and why to reason in that manner.
- Provide constraints if necessary and as needed.
- The system prompt should be extensive and cover a vast array of potential scenarios to specialize the agent.
""",
auto_generate_prompt=True,
llm=model,
)

print(prompt_generator_sys_prompt.get_prompt())
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "swarms"
version = "5.8.6"
version = "5.8.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <[email protected]>"]
Expand Down Expand Up @@ -84,7 +84,7 @@ swarms = "swarms.cli.main:main"

[tool.poetry.group.lint.dependencies]
black = ">=23.1,<25.0"
ruff = ">=0.5.1,<0.6.8"
ruff = ">=0.5.1,<0.6.10"
types-toml = "^0.10.8.1"
types-pytz = ">=2023.3,<2025.0"
types-chardet = "^5.0.4.6"
Expand Down
28 changes: 26 additions & 2 deletions swarms/prompts/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
from swarms_cloud.utils.log_to_swarms_database import log_agent_data
from swarms_cloud.utils.capture_system_data import capture_system_data
from swarms.tools.base_tool import BaseTool

# from swarms.agents.ape_agent import auto_generate_prompt
from typing import Any

class Prompt(BaseModel):
"""
Expand Down Expand Up @@ -70,11 +71,15 @@ class Prompt(BaseModel):
default="prompts",
description="The folder path within WORKSPACE_DIR where the prompt will be autosaved",
)
auto_generate_prompt: bool = Field(
default=False,
description="Flag to enable or disable auto-generating the prompt",
)
parent_folder: str = Field(
default=os.getenv("WORKSPACE_DIR"),
description="The folder where the autosave folder is in",
)
# tools: List[callable] = None
llm: Any = None

@validator("edit_history", pre=True, always=True)
def initialize_history(cls, v, values):
Expand All @@ -86,6 +91,15 @@ def initialize_history(cls, v, values):
values["content"]
] # Store initial version in history
return v

def __init__(self, **data):
super().__init__(**data)

if self.autosave:
self._autosave()

if self.auto_generate_prompt and self.llm:
self.auto_generate_prompt()

def edit_prompt(self, new_content: str) -> None:
"""
Expand Down Expand Up @@ -238,6 +252,16 @@ def _autosave(self) -> None:
with open(file_path, "w") as file:
json.dump(self.model_dump(), file)
logger.info(f"Autosaved prompt {self.id} to {file_path}.")

# def auto_generate_prompt(self):
# logger.info(f"Auto-generating prompt for {self.name}")
# task = self.name + " " + self.description + " " + self.content
# prompt = auto_generate_prompt(task, llm=self.llm, max_tokens=4000, use_second_sys_prompt=True)
# logger.info("Generated prompt successfully, updating content")
# self.edit_prompt(prompt)
# logger.info("Prompt content updated")

# return "Prompt auto-generated successfully."

class Config:
"""Pydantic configuration for better JSON serialization."""
Expand Down
Loading

0 comments on commit 247ecc6

Please sign in to comment.