From 8fd96bf1f2b1d89d0621d99b1fb0d3dc26f95fa5 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 9 Jan 2025 13:40:15 +0100 Subject: [PATCH 1/2] Added linting rules to ruff check and formatting with ruff format --- .pre-commit-config.yaml | 23 +- .../contrib/capabilities/generate_images.py | 2 +- .../contrib/capabilities/vision_capability.py | 2 +- .../tools/math/count_distinct_permutations.py | 2 +- autogen/agentchat/contrib/img_utils.py | 2 +- autogen/agentchat/contrib/reasoning_agent.py | 4 +- autogen/agentchat/contrib/swarm_agent.py | 5 - autogen/agentchat/contrib/tool_retriever.py | 6 +- autogen/agentchat/conversable_agent.py | 13 +- autogen/agentchat/groupchat.py | 12 +- .../realtime_agent/oai_realtime_client.py | 3 +- .../realtime_agent/realtime_agent.py | 2 - .../realtime_agent/realtime_client.py | 3 +- autogen/agentchat/utils.py | 23 +- autogen/cache/cache_factory.py | 9 +- autogen/cache/in_memory_cache.py | 1 - autogen/code_utils.py | 4 +- autogen/coding/func_with_reqs.py | 2 +- .../coding/local_commandline_code_executor.py | 10 +- autogen/io/__init__.py | 2 +- autogen/logger/sqlite_logger.py | 1 - autogen/messages/base_message.py | 8 +- autogen/oai/bedrock.py | 4 +- autogen/oai/cerebras.py | 8 +- autogen/oai/client.py | 4 +- autogen/oai/cohere.py | 5 - autogen/oai/completion.py | 6 +- autogen/oai/gemini.py | 37 +- autogen/oai/groq.py | 10 +- autogen/oai/mistral.py | 12 +- autogen/oai/ollama.py | 12 +- autogen/oai/openai_utils.py | 42 +- autogen/oai/together.py | 20 +- autogen/tools/dependency_injection.py | 3 +- autogen/tools/function_utils.py | 1 + notebook/agentchat_agentoptimizer.ipynb | 921 +++++++++--------- notebook/agentchat_dalle_and_gpt4v.ipynb | 14 +- notebook/agentchat_databricks_dbrx.ipynb | 1 + notebook/agentchat_function_call_async.ipynb | 7 +- ...entchat_nested_chats_chess_altmodels.ipynb | 12 +- notebook/lats_search.ipynb | 1 - pyproject.toml | 82 +- scripts/lint.sh | 7 + scripts/pre-commit-lint.sh | 34 + scripts/pre-commit-mypy-run.sh | 2 + .../retrievechat/test_qdrant_retrievechat.py | 2 +- test/agentchat/contrib/test_agent_builder.py | 1 - test/agentchat/contrib/test_swarm.py | 5 +- test/agentchat/test_chats.py | 3 +- test/agentchat/test_conversable_agent.py | 3 - test/agentchat/test_function_call.py | 3 +- test/agentchat/test_groupchat.py | 6 +- test/coding/test_markdown_code_extractor.py | 4 +- test/coding/test_user_defined_functions.py | 1 - test/interop/pydantic_ai/test_pydantic_ai.py | 1 - test/messages/test_agent_messages.py | 2 +- test/messages/test_base_message.py | 7 +- test/oai/test_bedrock.py | 3 - test/oai/test_cerebras.py | 1 - test/oai/test_client.py | 2 - test/oai/test_groq.py | 1 - test/oai/test_mistral.py | 1 - test/oai/test_ollama.py | 2 - test/oai/test_together.py | 2 - test/test_code_utils.py | 4 +- test/test_retrieve_utils.py | 1 + test/tools/test_function_utils.py | 6 +- .../code-execution/custom-executor.ipynb | 1 - website/process_notebooks.py | 1 - 69 files changed, 740 insertions(+), 707 deletions(-) create mode 100755 scripts/lint.sh create mode 100755 scripts/pre-commit-lint.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1fceab925a..dd0b312f85 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,17 +23,16 @@ repos: - id: trailing-whitespace - id: end-of-file-fixer - id: no-commit-to-branch - - repo: https://github.com/psf/black - rev: 24.4.2 - hooks: - - id: black - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.8 + - repo: local hooks: - - id: ruff - types_or: [ python, pyi, jupyter ] - args: ["--fix", "--ignore=E402"] - exclude: notebook/agentchat_databricks_dbrx.ipynb + - id: lint + name: linting and formatting + entry: "scripts/pre-commit-lint.sh" + language: python + # language_version: python3.9 + types: [python] + require_serial: true + verbose: true - repo: https://github.com/codespell-project/codespell rev: v2.3.0 hooks: @@ -80,7 +79,3 @@ repos: notebook/.* | website/.* )$ - - repo: https://github.com/nbQA-dev/nbQA - rev: 1.8.5 - hooks: - - id: nbqa-black diff --git a/autogen/agentchat/contrib/capabilities/generate_images.py b/autogen/agentchat/contrib/capabilities/generate_images.py index 429a466945..a00bf4bf94 100644 --- a/autogen/agentchat/contrib/capabilities/generate_images.py +++ b/autogen/agentchat/contrib/capabilities/generate_images.py @@ -7,8 +7,8 @@ import re from typing import Any, Dict, List, Literal, Optional, Protocol, Tuple, Union -from openai import OpenAI from PIL.Image import Image +from openai import OpenAI from autogen import Agent, ConversableAgent, code_utils from autogen.agentchat.contrib import img_utils diff --git a/autogen/agentchat/contrib/capabilities/vision_capability.py b/autogen/agentchat/contrib/capabilities/vision_capability.py index 2d5c02e10f..60d7b95b47 100644 --- a/autogen/agentchat/contrib/capabilities/vision_capability.py +++ b/autogen/agentchat/contrib/capabilities/vision_capability.py @@ -155,7 +155,7 @@ def process_last_received_message(self, content: Union[str, list[dict]]) -> str: ```python content = [ {"type": "text", "text": "What's weather in this cool photo:"}, - {"type": "image_url", "image_url": {"url": "http://example.com/photo.jpg"}} + {"type": "image_url", "image_url": {"url": "http://example.com/photo.jpg"}}, ] ``` Output: "What's weather in this cool photo: `` in case you can not see, the caption of this image is: diff --git a/autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py b/autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py index 91d3fdf906..29857af088 100644 --- a/autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +++ b/autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py @@ -12,7 +12,7 @@ def count_distinct_permutations(sequence): int: The number of distinct permutations. Example: - >>> count_distinct_permutations('aab') + >>> count_distinct_permutations("aab") 3 >>> count_distinct_permutations([1, 2, 2]) 3 diff --git a/autogen/agentchat/contrib/img_utils.py b/autogen/agentchat/contrib/img_utils.py index f6f4747a2f..3ac74aac4e 100644 --- a/autogen/agentchat/contrib/img_utils.py +++ b/autogen/agentchat/contrib/img_utils.py @@ -353,7 +353,7 @@ def num_tokens_from_gpt_image( Examples: -------- >>> from PIL import Image - >>> img = Image.new('RGB', (2500, 2500), color = 'red') + >>> img = Image.new("RGB", (2500, 2500), color="red") >>> num_tokens_from_gpt_image(img, model="gpt-4-vision") 765 """ diff --git a/autogen/agentchat/contrib/reasoning_agent.py b/autogen/agentchat/contrib/reasoning_agent.py index c7cfba7cf1..70897a04bf 100644 --- a/autogen/agentchat/contrib/reasoning_agent.py +++ b/autogen/agentchat/contrib/reasoning_agent.py @@ -43,7 +43,6 @@ class ThinkNode: - def __init__(self, content: str, parent: Optional["ThinkNode"] = None) -> None: """A node in a tree structure representing a step in the reasoning process. @@ -623,7 +622,8 @@ def _mtcs_reply(self, prompt, ground_truth=""): # More intensive analysis is needed in the future. choices_weights = [ # exploitation term + - (child.value / (child.visits + EPSILON)) + + (child.value / (child.visits + EPSILON)) + + # exploration term self._exploration_constant * math.sqrt(2 * math.log(node.visits + EPSILON) / (child.visits + EPSILON)) diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index 80c78a65f1..3e4272b3b5 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -661,7 +661,6 @@ def register_update_agent_state_before_reply(self, functions: Optional[Union[lis for func in functions: if isinstance(func, UPDATE_SYSTEM_MESSAGE): - # Wrapper function that allows this to be used in the update_agent_state hook # Its primary purpose, however, is just to update the agent's system message # Outer function to create a closure with the update function @@ -732,7 +731,6 @@ def transfer_to_agent_name() -> SwarmAgent: ), "Invalid After Work value" self.after_work = transit elif isinstance(transit, ON_CONDITION): - if isinstance(transit.target, SwarmAgent): # Transition to agent @@ -809,7 +807,6 @@ def generate_swarm_tool_reply( message = messages[-1] if "tool_calls" in message: - tool_call_count = len(message["tool_calls"]) # Loop through tool calls individually (so context can be updated after each function call) @@ -817,7 +814,6 @@ def generate_swarm_tool_reply( tool_responses_inner = [] contents = [] for index in range(tool_call_count): - # Deep copy to ensure no changes to messages when we insert the context variables message_copy = copy.deepcopy(message) @@ -834,7 +830,6 @@ def generate_swarm_tool_reply( # Inject the context variables into the tool call if it has the parameter sig = signature(func) if __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters: - current_args = json.loads(tool_call["function"]["arguments"]) current_args[__CONTEXT_VARIABLES_PARAM_NAME__] = self._context_variables tool_call["function"]["arguments"] = json.dumps(current_args) diff --git a/autogen/agentchat/contrib/tool_retriever.py b/autogen/agentchat/contrib/tool_retriever.py index 8844e8d8e9..2b0026808f 100644 --- a/autogen/agentchat/contrib/tool_retriever.py +++ b/autogen/agentchat/contrib/tool_retriever.py @@ -135,7 +135,7 @@ class LocalExecutorWithTools(CodeExecutor): ag2_tool = interop.convert_tool(tool=langchain_tool, type="langchain") # `ag2_tool.name` is wikipedia - local_executor = LocalExecutorWithTools(tools=[ag2_tool], work_dir='./') + local_executor = LocalExecutorWithTools(tools=[ag2_tool], work_dir="./") code = ''' result = wikipedia(tool_input={"query":"Christmas"}) @@ -161,13 +161,13 @@ def code_extractor(self) -> CodeExtractor: """(Experimental) Export a code extractor that can be used by an agent.""" return MarkdownCodeExtractor() - def __init__(self, tools: Optional[List[Tool]] = None, work_dir: Union[Path, str] = Path(".")): + def __init__(self, tools: Optional[list[Tool]] = None, work_dir: Union[Path, str] = Path(".")): self.tools = tools if tools is not None else [] self.work_dir = work_dir if not os.path.exists(work_dir): os.makedirs(work_dir, exist_ok=True) - def execute_code_blocks(self, code_blocks: List[CodeBlock]) -> CodeResult: + def execute_code_blocks(self, code_blocks: list[CodeBlock]) -> CodeResult: """Execute code blocks and return the result. Args: diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 758353d140..79f0f1968b 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -47,8 +47,8 @@ ConversableAgentUsageSummaryMessage, ConversableAgentUsageSummaryNoCostIncurredMessage, ExecuteCodeBlockMessage, - ExecutedFunctionMessage, ExecuteFunctionMessage, + ExecutedFunctionMessage, GenerateCodeExecutionReplyMessage, TerminationAndHumanReplyMessage, UsingAutoReplyMessage, @@ -435,7 +435,7 @@ def _get_chats_to_run( message = last_msg if callable(message): message = message(recipient, messages, sender, config) - # We only run chat that has a valid message. NOTE: This is prone to change dependin on applications. + # We only run chat that has a valid message. NOTE: This is prone to change depending on applications. if message: current_c["message"] = message chat_to_run.append(current_c) @@ -783,9 +783,7 @@ def send( ```python { "content": lambda context: context["use_tool_msg"], - "context": { - "use_tool_msg": "Use tool X if they are relevant." - } + "context": {"use_tool_msg": "Use tool X if they are relevant."}, } ``` Next time, one agent can send a message B with a different "use_tool_msg". @@ -833,9 +831,7 @@ async def a_send( ```python { "content": lambda context: context["use_tool_msg"], - "context": { - "use_tool_msg": "Use tool X if they are relevant." - } + "context": {"use_tool_msg": "Use tool X if they are relevant."}, } ``` Next time, one agent can send a message B with a different "use_tool_msg". @@ -2721,7 +2717,6 @@ def _decorator(func_or_tool: Union[F, Tool]) -> Tool: return _decorator def _register_for_llm(self, tool: Tool, api_style: Literal["tool", "function"]) -> None: - # register the function to the agent if there is LLM config, raise an exception otherwise if self.llm_config is None: raise RuntimeError("LLM config must be setup before registering a function for LLM.") diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 425b817f3d..12d3a23582 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -1107,9 +1107,7 @@ def last_speaker(self) -> Agent: def print_messages(recipient, messages, sender, config): # Print the message immediately - print( - f"Sender: {sender.name} | Recipient: {recipient.name} | Message: {messages[-1].get('content')}" - ) + print(f"Sender: {sender.name} | Recipient: {recipient.name} | Message: {messages[-1].get('content')}") print(f"Real Sender: {sender.last_speaker.name}") assert sender.last_speaker.name in messages[-1].get("content") return False, None # Required to ensure the agent communication flow continues @@ -1119,9 +1117,7 @@ def print_messages(recipient, messages, sender, config): agent_b = ConversableAgent("agent B", default_auto_reply="I'm agent B.") agent_c = ConversableAgent("agent C", default_auto_reply="I'm agent C.") for agent in [agent_a, agent_b, agent_c]: - agent.register_reply( - [ConversableAgent, None], reply_func=print_messages, config=None - ) + agent.register_reply([ConversableAgent, None], reply_func=print_messages, config=None) group_chat = GroupChat( [agent_a, agent_b, agent_c], messages=[], @@ -1130,9 +1126,7 @@ def print_messages(recipient, messages, sender, config): allow_repeat_speaker=True, ) chat_manager = GroupChatManager(group_chat) - groupchat_result = agent_a.initiate_chat( - chat_manager, message="Hi, there, I'm agent A." - ) + groupchat_result = agent_a.initiate_chat(chat_manager, message="Hi, there, I'm agent A.") ``` """ return self._last_speaker diff --git a/autogen/agentchat/realtime_agent/oai_realtime_client.py b/autogen/agentchat/realtime_agent/oai_realtime_client.py index 6a104a5670..b102c73656 100644 --- a/autogen/agentchat/realtime_agent/oai_realtime_client.py +++ b/autogen/agentchat/realtime_agent/oai_realtime_client.py @@ -4,9 +4,10 @@ import asyncio import json +from collections.abc import AsyncGenerator from contextlib import asynccontextmanager from logging import Logger, getLogger -from typing import TYPE_CHECKING, Any, AsyncGenerator, Optional +from typing import TYPE_CHECKING, Any, Optional import httpx from openai import DEFAULT_MAX_RETRIES, NOT_GIVEN, AsyncOpenAI diff --git a/autogen/agentchat/realtime_agent/realtime_agent.py b/autogen/agentchat/realtime_agent/realtime_agent.py index 6f45bdea38..52ed15fea5 100644 --- a/autogen/agentchat/realtime_agent/realtime_agent.py +++ b/autogen/agentchat/realtime_agent/realtime_agent.py @@ -168,10 +168,8 @@ async def run(self) -> None: """Run the agent.""" # everything is run in the same task group to enable easy cancellation using self._tg.cancel_scope.cancel() async with create_task_group() as self._tg: - # connect with the client first (establishes a connection and initializes a session) async with self._realtime_client.connect(): - # start the observers for observer in self._observers: self._tg.soonify(observer.run)(self) diff --git a/autogen/agentchat/realtime_agent/realtime_client.py b/autogen/agentchat/realtime_agent/realtime_client.py index 2195a78f83..0e914bf419 100644 --- a/autogen/agentchat/realtime_agent/realtime_client.py +++ b/autogen/agentchat/realtime_agent/realtime_client.py @@ -2,7 +2,8 @@ # # SPDX-License-Identifier: Apache-2.0 -from typing import Any, AsyncContextManager, AsyncGenerator, Literal, Protocol, runtime_checkable +from collections.abc import AsyncGenerator +from typing import Any, AsyncContextManager, Literal, Protocol, runtime_checkable __all__ = ["RealtimeClientProtocol", "Role"] diff --git a/autogen/agentchat/utils.py b/autogen/agentchat/utils.py index 74fa996165..cb3afe2c76 100644 --- a/autogen/agentchat/utils.py +++ b/autogen/agentchat/utils.py @@ -47,25 +47,24 @@ def gather_usage_summary(agents: list[Agent]) -> dict[dict[str, dict], dict[str, ```python { - "usage_including_cached_inference" : { + "usage_including_cached_inference": { "total_cost": 0.0006090000000000001, "gpt-35-turbo": { - "cost": 0.0006090000000000001, - "prompt_tokens": 242, - "completion_tokens": 123, - "total_tokens": 365 + "cost": 0.0006090000000000001, + "prompt_tokens": 242, + "completion_tokens": 123, + "total_tokens": 365, }, }, - - "usage_excluding_cached_inference" : { + "usage_excluding_cached_inference": { "total_cost": 0.0006090000000000001, "gpt-35-turbo": { - "cost": 0.0006090000000000001, - "prompt_tokens": 242, - "completion_tokens": 123, - "total_tokens": 365 + "cost": 0.0006090000000000001, + "prompt_tokens": 242, + "completion_tokens": 123, + "total_tokens": 365, }, - } + }, } ``` diff --git a/autogen/cache/cache_factory.py b/autogen/cache/cache_factory.py index b64328cfe7..e3ea13bd0b 100644 --- a/autogen/cache/cache_factory.py +++ b/autogen/cache/cache_factory.py @@ -53,11 +53,14 @@ def cache_factory( Creating a Cosmos DB cache: ```python - cosmos_cache = cache_factory("myseed", cosmosdb_config={ + cosmos_cache = cache_factory( + "myseed", + cosmosdb_config={ "connection_string": "your_connection_string", "database_id": "your_database_id", - "container_id": "your_container_id"} - ) + "container_id": "your_container_id", + }, + ) ``` """ diff --git a/autogen/cache/in_memory_cache.py b/autogen/cache/in_memory_cache.py index f080530e56..c955920c60 100644 --- a/autogen/cache/in_memory_cache.py +++ b/autogen/cache/in_memory_cache.py @@ -17,7 +17,6 @@ class InMemoryCache(AbstractCache): - def __init__(self, seed: Union[str, int] = ""): self._seed = str(seed) self._cache: dict[str, Any] = {} diff --git a/autogen/code_utils.py b/autogen/code_utils.py index 07ec3d77b5..97ecd9e961 100644 --- a/autogen/code_utils.py +++ b/autogen/code_utils.py @@ -473,7 +473,9 @@ def execute_code( image_list = ( ["python:3-slim", "python:3", "python:3-windowsservercore"] if use_docker is True - else [use_docker] if isinstance(use_docker, str) else use_docker + else [use_docker] + if isinstance(use_docker, str) + else use_docker ) for image in image_list: # check if the image exists diff --git a/autogen/coding/func_with_reqs.py b/autogen/coding/func_with_reqs.py index 1f842c9193..331511a696 100644 --- a/autogen/coding/func_with_reqs.py +++ b/autogen/coding/func_with_reqs.py @@ -162,7 +162,7 @@ def wrapper(func: Callable[P, T]) -> FunctionWithRequirements[T, P]: def _build_python_functions_file( - funcs: list[FunctionWithRequirements[Any, P] | Callable[..., Any] | FunctionWithRequirementsStr] + funcs: list[FunctionWithRequirements[Any, P] | Callable[..., Any] | FunctionWithRequirementsStr], ) -> str: # First collect all global imports global_imports: set[str] = set() diff --git a/autogen/coding/local_commandline_code_executor.py b/autogen/coding/local_commandline_code_executor.py index bcbab20ef2..873794a4ba 100644 --- a/autogen/coding/local_commandline_code_executor.py +++ b/autogen/coding/local_commandline_code_executor.py @@ -355,8 +355,9 @@ def new(cls, *args, **kwargs): # type: ignore[no-untyped-def] if alias is not None: warnings.warn( - "{} has been renamed to {}, the alias will be " - "removed in the future".format(cls.__name__, alias.__name__), + "{} has been renamed to {}, the alias will be " "removed in the future".format( + cls.__name__, alias.__name__ + ), DeprecationWarning, stacklevel=2, ) @@ -373,8 +374,9 @@ def new(cls, *args, **kwargs): # type: ignore[no-untyped-def] if alias is not None: warnings.warn( - "{} has been renamed to {}, the alias will be " - "removed in the future".format(b.__name__, alias.__name__), + "{} has been renamed to {}, the alias will be " "removed in the future".format( + b.__name__, alias.__name__ + ), DeprecationWarning, stacklevel=2, ) diff --git a/autogen/io/__init__.py b/autogen/io/__init__.py index 143281f5cf..a3a6c6cddd 100644 --- a/autogen/io/__init__.py +++ b/autogen/io/__init__.py @@ -4,7 +4,7 @@ # # Portions derived from https://github.com/microsoft/autogen are under the MIT License. # SPDX-License-Identifier: MIT -from .base import InputStream, IOStream, OutputStream +from .base import IOStream, InputStream, OutputStream from .console import IOConsole from .websockets import IOWebsockets diff --git a/autogen/logger/sqlite_logger.py b/autogen/logger/sqlite_logger.py index 24bd7447e3..18fc4275b4 100644 --- a/autogen/logger/sqlite_logger.py +++ b/autogen/logger/sqlite_logger.py @@ -383,7 +383,6 @@ def log_new_wrapper(self, wrapper: OpenAIWrapper, init_args: dict[str, LLMConfig self._run_query(query=query, args=args) def log_function_use(self, source: str | Agent, function: F, args: dict[str, Any], returns: Any) -> None: - if self.con is None: return diff --git a/autogen/messages/base_message.py b/autogen/messages/base_message.py index ab72a4ec57..4f7eaba8ef 100644 --- a/autogen/messages/base_message.py +++ b/autogen/messages/base_message.py @@ -34,10 +34,10 @@ def camel2snake(name: str) -> str: return "".join(["_" + i.lower() if i.isupper() else i for i in name]).lstrip("_") -_message_classes: dict[str, Type[BaseModel]] = {} +_message_classes: dict[str, type[BaseModel]] = {} -def wrap_message(message_cls: Type[BaseMessage]) -> Type[BaseModel]: +def wrap_message(message_cls: type[BaseMessage]) -> type[BaseModel]: """Wrap a message class with a type field to be used in a union type This is needed for proper serialization and deserialization of messages in a union type. @@ -78,11 +78,11 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: return Wrapper -def get_annotated_type_for_message_classes() -> Type[Any]: +def get_annotated_type_for_message_classes() -> type[Any]: # this is a dynamic type so we need to disable the type checker union_type = Union[tuple(_message_classes.values())] # type: ignore[valid-type] return Annotated[union_type, Field(discriminator="type")] # type: ignore[return-value] -def get_message_classes() -> dict[str, Type[BaseModel]]: +def get_message_classes() -> dict[str, type[BaseModel]]: return _message_classes diff --git a/autogen/oai/bedrock.py b/autogen/oai/bedrock.py index b624cc9125..722f1e029f 100644 --- a/autogen/oai/bedrock.py +++ b/autogen/oai/bedrock.py @@ -21,7 +21,7 @@ "aws_region": "us-west-2", "aws_access_key": "", "aws_secret_key": "", - "price" : [0.003, 0.015] + "price": [0.003, 0.015], } ] @@ -104,7 +104,6 @@ def __init__(self, **kwargs: Any): or self._aws_secret_key is None or self._aws_secret_key == "" ): - # attempts to get client from attached role of managed service (lambda, ec2, ecs, etc.) self.bedrock_runtime = boto3.client(service_name="bedrock-runtime", config=bedrock_config) else: @@ -505,7 +504,6 @@ def parse_image(image_url: str) -> tuple[bytes, str]: response = requests.get(image_url) # Check if the request was successful if response.status_code == 200: - content_type = response.headers.get("Content-Type") if not content_type.startswith("image"): content_type = "image/jpeg" diff --git a/autogen/oai/cerebras.py b/autogen/oai/cerebras.py index 3cbcf3368f..f30d12379e 100644 --- a/autogen/oai/cerebras.py +++ b/autogen/oai/cerebras.py @@ -8,12 +8,8 @@ Example: ```python - llm_config={ - "config_list": [{ - "api_type": "cerebras", - "model": "llama3.1-8b", - "api_key": os.environ.get("CEREBRAS_API_KEY") - }] + llm_config = { + "config_list": [{"api_type": "cerebras", "model": "llama3.1-8b", "api_key": os.environ.get("CEREBRAS_API_KEY")}] } agent = autogen.AssistantAgent("my_agent", llm_config=llm_config) diff --git a/autogen/oai/client.py b/autogen/oai/client.py index bfb914b115..3f923cd629 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -489,7 +489,7 @@ def __init__( They can contain additional kwargs as allowed in the [create](/docs/reference/oai/client#create) method. E.g., ```python - config_list=[ + config_list = [ { "model": "gpt-4", "api_key": os.environ.get("AZURE_OPENAI_API_KEY"), @@ -506,7 +506,7 @@ def __init__( { "model": "llama-7B", "base_url": "http://127.0.0.1:8080", - } + }, ] ``` diff --git a/autogen/oai/cohere.py b/autogen/oai/cohere.py index 3765902ce0..dc2db1f8a6 100644 --- a/autogen/oai/cohere.py +++ b/autogen/oai/cohere.py @@ -225,7 +225,6 @@ def create(self, params: dict) -> ChatCompletion: cohere_finish = "tool_calls" tool_calls = [] for tool_call in response.tool_calls: - # if parameters are null, clear them out (Cohere can return a string "null" if no parameter values) tool_calls.append( @@ -270,7 +269,6 @@ def extract_to_cohere_tool_results(tool_call_id: str, content_output: str, all_t for tool_call in all_tool_calls: if tool_call["id"] == tool_call_id: - call = { "name": tool_call["function"]["name"], "parameters": json.loads( @@ -306,7 +304,6 @@ def oai_messages_to_cohere_messages( if "tools" in params: cohere_tools = [] for tool in params["tools"]: - # build list of properties parameters = {} @@ -351,7 +348,6 @@ def oai_messages_to_cohere_messages( # tool_results go into tool_results parameter messages_length = len(messages) for index, message in enumerate(messages): - if "role" in message and message["role"] == "system": # System message if preamble == "": @@ -422,7 +418,6 @@ def oai_messages_to_cohere_messages( return cohere_messages, preamble, "" else: - # We need to get the last message to assign to the message field for Cohere, # if the last message is a user message, use that, otherwise put in 'continue'. if cohere_messages[-1]["role"] == "USER": diff --git a/autogen/oai/completion.py b/autogen/oai/completion.py index 0768de778e..4f7d93497e 100644 --- a/autogen/oai/completion.py +++ b/autogen/oai/completion.py @@ -769,7 +769,7 @@ def create( "model": "llama-7B", "base_url": "http://127.0.0.1:8080", "api_type": "openai", - } + }, ], prompt="Hi", ) @@ -953,7 +953,7 @@ def eval_func(responses, **data): An example agg_method in str: ```python - agg_method = 'median' + agg_method = "median" ``` An example agg_method in a Callable: @@ -964,7 +964,7 @@ def eval_func(responses, **data): An example agg_method in a dict of Callable: ```python - agg_method={'median_success': np.median, 'avg_success': np.mean} + agg_method = {"median_success": np.median, "avg_success": np.mean} ``` return_responses_and_per_instance_result (bool): Whether to also return responses diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py index af2d017fcc..1d24cebcaf 100644 --- a/autogen/oai/gemini.py +++ b/autogen/oai/gemini.py @@ -9,23 +9,25 @@ Example: ```python - llm_config={ - "config_list": [{ - "api_type": "google", - "model": "gemini-pro", - "api_key": os.environ.get("GOOGLE_GEMINI_API_KEY"), - "safety_settings": [ + llm_config = { + "config_list": [ + { + "api_type": "google", + "model": "gemini-pro", + "api_key": os.environ.get("GOOGLE_GEMINI_API_KEY"), + "safety_settings": [ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"}, {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"}, - {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"} - ], - "top_p":0.5, - "max_tokens": 2048, - "temperature": 1.0, - "top_k": 5 + {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}, + ], + "top_p": 0.5, + "max_tokens": 2048, + "temperature": 1.0, + "top_k": 5, } - ]} + ] + } agent = autogen.AssistantAgent("my_agent", llm_config=llm_config) ``` @@ -52,10 +54,11 @@ from io import BytesIO from typing import Any, Dict, List, Optional, Tuple, Union -import google.generativeai as genai import PIL +import google.generativeai as genai import requests import vertexai +from PIL import Image from google.ai.generativelanguage import Content, FunctionCall, FunctionDeclaration, FunctionResponse, Part, Tool from google.ai.generativelanguage_v1beta.types import Schema from google.auth.credentials import Credentials @@ -63,7 +66,6 @@ from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall from openai.types.chat.chat_completion import ChatCompletionMessage, Choice from openai.types.completion_usage import CompletionUsage -from PIL import Image from pydantic import BaseModel from vertexai.generative_models import ( Content as VertexAIContent, @@ -179,7 +181,6 @@ def get_usage(response) -> dict: } def create(self, params: dict) -> ChatCompletion: - if self.use_vertexai: self._initialize_vertexai(**params) else: @@ -266,10 +267,8 @@ def create(self, params: dict) -> ChatCompletion: random_id = random.randint(0, 10000) prev_function_calls = [] for part in response.parts: - # Function calls if fn_call := part.function_call: - # If we have a repeated function call, ignore it if fn_call not in prev_function_calls: autogen_tool_calls.append( @@ -355,7 +354,6 @@ def _oai_content_to_gemini_content(self, message: dict[str, Any]) -> tuple[list, return rst, "tool" elif "tool_calls" in message and len(message["tool_calls"]) != 0: for tool_call in message["tool_calls"]: - function_id = tool_call["id"] function_name = tool_call["function"]["name"] self.tool_call_function_map[function_id] = function_name @@ -687,7 +685,6 @@ def get_image_data(image_file: str, use_b64=True) -> bytes: def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens: int, model_name: str) -> float: - def total_cost_mil(cost_per_mil_input: float, cost_per_mil_output: float): # Cost per million return cost_per_mil_input * input_tokens / 1e6 + cost_per_mil_output * output_tokens / 1e6 diff --git a/autogen/oai/groq.py b/autogen/oai/groq.py index 3330e99e52..309566695a 100644 --- a/autogen/oai/groq.py +++ b/autogen/oai/groq.py @@ -8,13 +8,9 @@ Example: ```python - llm_config={ - "config_list": [{ - "api_type": "groq", - "model": "mixtral-8x7b-32768", - "api_key": os.environ.get("GROQ_API_KEY") - } - ]} + llm_config = { + "config_list": [{"api_type": "groq", "model": "mixtral-8x7b-32768", "api_key": os.environ.get("GROQ_API_KEY")}] + } agent = autogen.AssistantAgent("my_agent", llm_config=llm_config) ``` diff --git a/autogen/oai/mistral.py b/autogen/oai/mistral.py index 54a998a390..2eb262f06a 100644 --- a/autogen/oai/mistral.py +++ b/autogen/oai/mistral.py @@ -8,13 +8,11 @@ Example: ```python - llm_config={ - "config_list": [{ - "api_type": "mistral", - "model": "open-mixtral-8x22b", - "api_key": os.environ.get("MISTRAL_API_KEY") - } - ]} + llm_config = { + "config_list": [ + {"api_type": "mistral", "model": "open-mixtral-8x22b", "api_key": os.environ.get("MISTRAL_API_KEY")} + ] + } agent = autogen.AssistantAgent("my_agent", llm_config=llm_config) ``` diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index 57c7183b11..16c2a5b14e 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -8,12 +8,7 @@ Example: ```python - llm_config={ - "config_list": [{ - "api_type": "ollama", - "model": "mistral:7b-instruct-v0.3-q6_K" - } - ]} + llm_config = {"config_list": [{"api_type": "ollama", "model": "mistral:7b-instruct-v0.3-q6_K"}]} agent = autogen.AssistantAgent("my_agent", llm_config=llm_config) ``` @@ -262,7 +257,6 @@ def create(self, params: dict) -> ChatCompletion: total_tokens = prompt_tokens + completion_tokens if response is not None: - # Defaults ollama_finish = "stop" tool_calls = None @@ -277,9 +271,7 @@ def create(self, params: dict) -> ChatCompletion: # Process tools in the response if self._tools_in_conversation: - if self._native_tool_calls: - if not ollama_params["stream"]: response_content = response["message"]["content"] @@ -303,7 +295,6 @@ def create(self, params: dict) -> ChatCompletion: random_id += 1 elif not self._native_tool_calls: - # Try to convert the response to a tool call object response_toolcalls = response_to_tool_call(ans) @@ -481,7 +472,6 @@ def response_to_tool_call(response_string: str) -> Any: matches = re.findall(pattern, response_string.strip()) for match in matches: - # It has matched, extract it and load it json_str = match.strip() data_object = None diff --git a/autogen/oai/openai_utils.py b/autogen/oai/openai_utils.py index 77da5a6279..9535379d3f 100644 --- a/autogen/oai/openai_utils.py +++ b/autogen/oai/openai_utils.py @@ -141,14 +141,14 @@ def get_config_list( Example: ```python # Define a list of API keys - api_keys = ['key1', 'key2', 'key3'] + api_keys = ["key1", "key2", "key3"] # Optionally, define a list of base URLs corresponding to each API key - base_urls = ['https://api.service1.com', 'https://api.service2.com', 'https://api.service3.com'] + base_urls = ["https://api.service1.com", "https://api.service2.com", "https://api.service3.com"] # Optionally, define the API type and version if they are common for all keys - api_type = 'azure' - api_version = '2024-02-01' + api_type = "azure" + api_version = "2024-02-01" # Call the get_config_list function to get a list of configuration dictionaries config_list = get_config_list(api_keys, base_urls, api_type, api_version) @@ -330,15 +330,15 @@ def config_list_from_models( Example: ```python # Define the path where the API key files are located - key_file_path = '/path/to/key/files' + key_file_path = "/path/to/key/files" # Define the file names for the OpenAI and Azure OpenAI API keys and bases - openai_api_key_file = 'key_openai.txt' - aoai_api_key_file = 'key_aoai.txt' - aoai_api_base_file = 'base_aoai.txt' + openai_api_key_file = "key_openai.txt" + aoai_api_key_file = "key_aoai.txt" + aoai_api_base_file = "base_aoai.txt" # Define the list of models for which to create configurations - model_list = ['gpt-4', 'gpt-3.5-turbo'] + model_list = ["gpt-4", "gpt-3.5-turbo"] # Call the function to get a list of configuration dictionaries config_list = config_list_from_models( @@ -346,7 +346,7 @@ def config_list_from_models( openai_api_key_file=openai_api_key_file, aoai_api_key_file=aoai_api_key_file, aoai_api_base_file=aoai_api_base_file, - model_list=model_list + model_list=model_list, ) # The `config_list` will contain configurations for the specified models, for example: @@ -424,16 +424,16 @@ def filter_config( ```python # Example configuration list with various models and API types configs = [ - {'model': 'gpt-3.5-turbo'}, - {'model': 'gpt-4'}, - {'model': 'gpt-3.5-turbo', 'api_type': 'azure'}, - {'model': 'gpt-3.5-turbo', 'tags': ['gpt35_turbo', 'gpt-35-turbo']}, + {"model": "gpt-3.5-turbo"}, + {"model": "gpt-4"}, + {"model": "gpt-3.5-turbo", "api_type": "azure"}, + {"model": "gpt-3.5-turbo", "tags": ["gpt35_turbo", "gpt-35-turbo"]}, ] # Define filter criteria to select configurations for the 'gpt-3.5-turbo' model # that are also using the 'azure' API type filter_criteria = { - 'model': ['gpt-3.5-turbo'], # Only accept configurations for 'gpt-3.5-turbo' - 'api_type': ['azure'] # Only accept configurations for 'azure' API type + "model": ["gpt-3.5-turbo"], # Only accept configurations for 'gpt-3.5-turbo' + "api_type": ["azure"], # Only accept configurations for 'azure' API type } # Apply the filter to the configuration list filtered_configs = filter_config(configs, filter_criteria) @@ -441,7 +441,7 @@ def filter_config( # [{'model': 'gpt-3.5-turbo', 'api_type': 'azure', ...}] # Define a filter to select a given tag filter_criteria = { - 'tags': ['gpt35_turbo'], + "tags": ["gpt35_turbo"], } # Apply the filter to the configuration list filtered_configs = filter_config(configs, filter_criteria) @@ -507,7 +507,7 @@ def config_list_from_json( # We can retrieve a filtered list of configurations like this: filter_criteria = {"model": ["gpt-3.5-turbo"]} - configs = config_list_from_json('CONFIG_JSON', filter_dict=filter_criteria) + configs = config_list_from_json("CONFIG_JSON", filter_dict=filter_criteria) # The 'configs' variable will now contain only the configurations that match the filter criteria. ``` @@ -553,11 +553,7 @@ def get_config( Example: ```python - config = get_config( - api_key="sk-abcdef1234567890", - base_url="https://api.openai.com", - api_version="v1" - ) + config = get_config(api_key="sk-abcdef1234567890", base_url="https://api.openai.com", api_version="v1") # The 'config' variable will now contain: # { # "api_key": "sk-abcdef1234567890", diff --git a/autogen/oai/together.py b/autogen/oai/together.py index ac55cb81b3..f72763b9cc 100644 --- a/autogen/oai/together.py +++ b/autogen/oai/together.py @@ -8,13 +8,15 @@ Example: ```python - llm_config={ - "config_list": [{ - "api_type": "together", - "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "api_key": os.environ.get("TOGETHER_API_KEY") + llm_config = { + "config_list": [ + { + "api_type": "together", + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "api_key": os.environ.get("TOGETHER_API_KEY"), } - ]} + ] + } agent = autogen.AssistantAgent("my_agent", llm_config=llm_config) ``` @@ -39,10 +41,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union import requests +from PIL import Image from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall from openai.types.chat.chat_completion import ChatCompletionMessage, Choice from openai.types.completion_usage import CompletionUsage -from PIL import Image from pydantic import BaseModel from together import Together, error @@ -66,9 +68,7 @@ def __init__(self, **kwargs): if "response_format" in kwargs and kwargs["response_format"] is not None: warnings.warn("response_format is not supported for Together.AI, it will be ignored.", UserWarning) - assert ( - self.api_key - ), "Please include the api_key in your config list entry for Together.AI or set the TOGETHER_API_KEY env variable." + assert self.api_key, "Please include the api_key in your config list entry for Together.AI or set the TOGETHER_API_KEY env variable." def message_retrieval(self, response) -> list: """ diff --git a/autogen/tools/dependency_injection.py b/autogen/tools/dependency_injection.py index 2b9b3af06e..5a7ed2f7ff 100644 --- a/autogen/tools/dependency_injection.py +++ b/autogen/tools/dependency_injection.py @@ -5,8 +5,9 @@ import inspect import sys from abc import ABC +from collections.abc import Iterable from functools import wraps -from typing import Any, Callable, Iterable, get_type_hints +from typing import Any, Callable, get_type_hints from fast_depends import Depends as FastDepends from fast_depends import inject diff --git a/autogen/tools/function_utils.py b/autogen/tools/function_utils.py index 2b7e458d2c..8bafc554bf 100644 --- a/autogen/tools/function_utils.py +++ b/autogen/tools/function_utils.py @@ -241,6 +241,7 @@ def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, de def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None: pass + get_function_schema(f, description="function f") # {'type': 'function', diff --git a/notebook/agentchat_agentoptimizer.ipynb b/notebook/agentchat_agentoptimizer.ipynb index cf7c27c920..4708b2a64c 100644 --- a/notebook/agentchat_agentoptimizer.ipynb +++ b/notebook/agentchat_agentoptimizer.ipynb @@ -1,466 +1,465 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# AgentOptimizer: An Agentic Way to Train Your LLM Agent\n", - "\n", - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://docs.ag2.ai/docs/Use-Cases/agent_chat).\n", - "\n", - "In traditional ML pipeline, we train a model by updating its parameter according to the loss on the training set, while in the era of LLM agents, how should we train an agent? Here, we take an initial step towards the agent training. Inspired by the [function calling](https://platform.openai.com/docs/guides/function-calling) capabilities provided by OpenAI, we draw an analogy between model parameters and agent functions/skills, and update agent’s functions/skills based on its historical performance on the training set. As an agentic way of training an agent, our approach help enhance the agents’ abilities without requiring access to the LLMs parameters.\n", - "\n", - "In this notebook, we introduce a new class, ‘AgentOptimizer’, which is able to improve the function list of one Assistant-UserProxy pair according to the historical conversation histories.\n", - "This feature would support agents in improving their ability to solve problems of the same type as previous tasks.\n", - "Specifically, given a set of training data, AgentOptimizer would iteratively prompt the LLM to optimize the existing function list of the AssistantAgent and UserProxyAgent with code implementation if necessary. It also includes two strategies, roll-back, and early-stop, to streamline the training process.\n", - "In the example scenario, we test the proposed AgentOptimizer in solving problems from the [MATH dataset](https://github.com/hendrycks/math). \n", - "\n", - "![AgentOptimizer](https://media.githubusercontent.com/media/ag2ai/ag2/main/website/blog/2023-12-23-AgentOptimizer/img/agentoptimizer.png)\n", - "\n", - "More information could be found in the [paper](https://arxiv.org/abs/2402.11359).\n", - "\n", - "Authors:\n", - "- [Shaokun Zhang](https://github.com/skzhang1), Ph.D. student at the The Pennsylvania State University\n", - "- [Jieyu Zhang](https://jieyuz2.github.io), Ph.D. student at the University of Washington" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "import copy\n", - "import json\n", - "import os\n", - "from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union\n", - "\n", - "from openai import BadRequestError\n", - "\n", - "import autogen\n", - "from autogen import config_list_from_json\n", - "from autogen.agentchat import Agent\n", - "from autogen.agentchat.contrib.agent_optimizer import AgentOptimizer\n", - "from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent\n", - "from autogen.code_utils import extract_code\n", - "from autogen.math_utils import get_answer" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# MathUserProxy with function_call\n", - "\n", - "This agent is a customized MathUserProxy inherits from its [parent class](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/contrib/math_user_proxy_agent.py).\n", - "\n", - "It supports using both function_call and python to solve math problems.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "def is_termination_msg_mathchat(message):\n", - " \"\"\"Check if a message is a termination message.\"\"\"\n", - " if isinstance(message, dict):\n", - " message = message.get(\"content\")\n", - " if message is None:\n", - " return False\n", - " cb = extract_code(message)\n", - " contain_code = False\n", - " for c in cb:\n", - " if c[0] == \"python\":\n", - " contain_code = True\n", - " break\n", - " if message.rstrip().find(\"TERMINATE\") >= 0:\n", - " return True\n", - " return not contain_code and get_answer(message) is not None and get_answer(message) != \"\"\n", - "\n", - "\n", - "class MathUserProxyAgent(MathUserProxyAgent):\n", - " MAX_CONSECUTIVE_AUTO_REPLY = 15\n", - " DEFAULT_REPLY = \"Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\\\boxed{}.)\"\n", - " PROMPTS = \"\"\"Let's solve a math problem.\n", - "Query requirements:\n", - "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n", - "You can use packages like sympy to help you.\n", - "You must follow the formats below to write your code:\n", - "```python\n", - "# your code\n", - "```\n", - "If some packages are missing, you could also suggest a code to install the corresponding package.\n", - "\n", - "Please follow this process:\n", - "1. Solve the problem step by step (do not over-divide the steps).\n", - "2. Take out any queries that can be asked through Python code (for example, any calculations or equations that can be calculated) and functions you know in the context of this conversation.\n", - "\n", - "Please\n", - "(1) do not mix suggested Python codes and function calls in one step.\n", - "(2) You MUST remember that you don’t have a function named \"python\" available.\n", - "\n", - "You must follow the formats below to write your Python code:\n", - "```python\n", - "# your code\n", - "```\n", - "\n", - "3. Wait for me to give the results or wait for the executed results of the function call.\n", - "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n", - "\n", - "After all the queries are run and you get the answer, put the answer in \\\\boxed{}.\n", - "\n", - "Problem:\n", - "\"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " name: Optional[str] = \"MathChatAgent\",\n", - " is_termination_msg: Optional[Callable[[Dict], bool]] = is_termination_msg_mathchat,\n", - " human_input_mode: Literal[\"ALWAYS\", \"NEVER\", \"TERMINATE\"] = \"NEVER\",\n", - " default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,\n", - " max_invalid_q_per_step=3,\n", - " **kwargs,\n", - " ):\n", - " super().__init__(\n", - " name=name,\n", - " is_termination_msg=is_termination_msg,\n", - " human_input_mode=human_input_mode,\n", - " default_auto_reply=default_auto_reply,\n", - " max_invalid_q_per_step=max_invalid_q_per_step,\n", - " **kwargs,\n", - " )\n", - " del self._reply_func_list[2]\n", - " self.register_reply([Agent, None], MathUserProxyAgent._generate_math_reply, position=4)\n", - " del self._reply_func_list[3]\n", - " self.register_reply(\n", - " trigger=autogen.ConversableAgent, reply_func=MathUserProxyAgent.generate_function_call_reply, position=3\n", - " )\n", - " self.register_reply(\n", - " trigger=autogen.ConversableAgent, reply_func=MathUserProxyAgent._check_final_result, position=0\n", - " )\n", - "\n", - " self.max_function_call_trial = 3\n", - " self.query = None\n", - " self.answer = None\n", - " self.is_correct = None\n", - "\n", - " def generate_function_call_reply(\n", - " self,\n", - " messages: Optional[List[Dict]] = None,\n", - " sender: Optional[autogen.ConversableAgent] = None,\n", - " config: Optional[Any] = None,\n", - " ) -> Tuple[bool, Union[Dict, None]]:\n", - " \"\"\"Generate a reply using function call.\"\"\"\n", - " if messages is None:\n", - " messages = self._oai_messages[sender]\n", - " message = messages[-1]\n", - " if \"function_call\" in message:\n", - " is_exec_success, func_return = self.execute_function(message[\"function_call\"])\n", - " if is_exec_success:\n", - " self.max_function_call_trial = 3\n", - " return True, func_return\n", - " else:\n", - " if self.max_function_call_trial == 0:\n", - " error_message = func_return[\"content\"]\n", - " self.max_function_call_trial = 3\n", - " return (\n", - " True,\n", - " \"The func is executed failed many times. \"\n", - " + error_message\n", - " + \". Please directly reply me with TERMINATE. We need to terminate the conversation.\",\n", - " )\n", - " else:\n", - " revise_prompt = \"You may make a wrong function call (It may due the arguments you provided doesn't fit the function arguments like missing required positional argument). \\\n", - " If you think this error occurs due to you make a wrong function arguments input and you could make it success, please try to call this function again using the correct arguments. \\\n", - " Otherwise, the error may be caused by the function itself. Please directly reply me with TERMINATE. We need to terminate the conversation. \"\n", - " error_message = func_return[\"content\"]\n", - " return True, \"The func is executed failed.\" + error_message + revise_prompt\n", - " return False, None\n", - "\n", - " def initiate_chat(\n", - " self,\n", - " recipient,\n", - " answer: None,\n", - " silent: Optional[bool] = False,\n", - " **context,\n", - " ):\n", - " self.query = context[\"problem\"]\n", - " if not isinstance(answer, str):\n", - " answer = str(answer)\n", - " if answer.endswith(\".0\"):\n", - " answer = answer[:-2]\n", - " self._answer = answer\n", - " else:\n", - " self._answer = answer\n", - "\n", - " self.is_correct = None\n", - "\n", - " self._prepare_chat(recipient, True)\n", - " error_message = None\n", - " try:\n", - " prompt = self.PROMPTS + context[\"problem\"]\n", - " self.send(prompt, recipient, silent=silent)\n", - " except BadRequestError as e:\n", - " error_message = str(e)\n", - " self.is_correct = 0\n", - " print(\"error information: {}\".format(error_message))\n", - "\n", - " recipient.reset()\n", - " is_correct = copy.deepcopy(self.is_correct)\n", - " self._reset()\n", - " return is_correct\n", - "\n", - " def _check_final_result(\n", - " self,\n", - " messages: Optional[List[Dict]] = None,\n", - " sender: Optional[autogen.Agent] = None,\n", - " config: Optional[Any] = None,\n", - " ):\n", - "\n", - " messages = messages[-1]\n", - " if isinstance(messages, dict):\n", - " messages = messages.get(\"content\")\n", - " if messages is None:\n", - " return False, None\n", - "\n", - " cb = extract_code(messages)\n", - " contain_code = False\n", - " for c in cb:\n", - " if c[0] == \"python\":\n", - " contain_code = True\n", - " break\n", - " if not contain_code and get_answer(messages) is not None and get_answer(messages) != \"\":\n", - " if get_answer(messages) == self._answer:\n", - " self.is_correct = 1\n", - " return True, \"The result is Correct. Please reply me with TERMINATE.\"\n", - " else:\n", - " self.is_correct = 0\n", - " return False, None\n", - " else:\n", - " return False, None\n", - "\n", - " def _reset(self):\n", - " super()._reset()\n", - " self.max_function_call_trial = 3\n", - " self.is_correct = None\n", - " self.query = None\n", - " self.answer = None" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Load dataset\n", - "\n", - "MATAH dataset contains 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations. \n", - "\n", - "We strictly follow the [train](https://github.com/lifan-yuan/CRAFT/blob/main/tab_and_math/MATH/dataset/train/algebra.jsonl)/[test](https://github.com/lifan-yuan/CRAFT/blob/main/tab_and_math/MATH/dataset/algebra.jsonl) splits of [Craft](https://github.com/lifan-yuan/CRAFT). Please specific your own path to the dataset. Here we sample the first 10 algebra problems as examples. " - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "test_data, train_data = [], []\n", - "with open(\"MATH/dataset/algebra.jsonl\", \"r\", encoding=\"utf-8\") as f:\n", - " for line in f:\n", - " test_data.append(json.loads(line))\n", - "with open(\"MATH/dataset/train/algebra.jsonl\", \"r\", encoding=\"utf-8\") as f:\n", - " for line in f:\n", - " train_data.append(json.loads(line))\n", - "test_data, train_data = test_data[0:10], train_data[0:10]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agents construction\n", - "\n", - "Constructing MathUserProxyAgent and AssistantAgent used in solving these problems. Here, we use gpt-4-1106-preview to construct the AssistantAgent. " - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [], - "source": [ - "llm_config = {\n", - " \"config_list\": [\n", - " {\n", - " \"model\": \"gpt-4-1106-preview\",\n", - " \"api_type\": \"azure\",\n", - " \"api_key\": os.environ[\"AZURE_OPENAI_API_KEY\"],\n", - " \"base_url\": \"https://ENDPOINT.openai.azure.com/\",\n", - " \"api_version\": \"2023-07-01-preview\",\n", - " }\n", - " ]\n", - "}\n", - "\n", - "assistant = autogen.AssistantAgent(\n", - " name=\"assistant\",\n", - " system_message=\"You are a helpful assistant.\",\n", - " llm_config=llm_config,\n", - ")\n", - "user_proxy = MathUserProxyAgent(\n", - " name=\"mathproxyagent\",\n", - " human_input_mode=\"NEVER\",\n", - " code_execution_config={\"work_dir\": \"_output\", \"use_docker\": False},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Test without agent optimizations \n", - "\n", - "Below is the code to get the performance without the agents optimization process. \n", - "\n", - "In this case, the AssistantAgent and MathUserProxyAgent don't have any function calls but solely solve problems with Python." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sum = 0\n", - "for index, query in enumerate(test_data):\n", - " is_correct = user_proxy.initiate_chat(recipient=assistant, answer=query[\"answer\"], problem=query[\"question\"])\n", - " print(is_correct)\n", - " sum += is_correct\n", - "success_rate_without_agent_training = sum / 10" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agent Training \n", - "\n", - "Then, we use the AgentOptimizer to iteratively optimize the agents by optimizing the function calls according to the historical conversations and performance.\n", - "The AgentOptimizer yields register_for_llm and register_for_executor at each iteration, which are subsequently utilized to update the assistant and user_proxy agents, respectively. \n", - "Here we optimize these two agents for ten epochs. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "EPOCH = 10\n", - "optimizer_model = \"gpt-4-1106-preview\"\n", - "optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config, optimizer_model=optimizer_model)\n", - "for i in range(EPOCH):\n", - " for index, query in enumerate(train_data):\n", - " is_correct = user_proxy.initiate_chat(assistant, answer=query[\"answer\"], problem=query[\"question\"])\n", - " history = assistant.chat_messages_for_summary(user_proxy)\n", - " optimizer.record_one_conversation(history, is_satisfied=is_correct)\n", - " register_for_llm, register_for_exector = optimizer.step()\n", - " for item in register_for_llm:\n", - " assistant.update_function_signature(**item)\n", - " if len(register_for_exector.keys()) > 0:\n", - " user_proxy.register_function(function_map=register_for_exector)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Test with agent optimizations \n", - "\n", - "After agent optimization, the agents obtained a list of functions from the AgentOptimizers after 10 optimization iterations as shown below.\n", - "\n", - "We then show the final performances with/without the agent optimization process. We observe the agents after optimization are obviously better.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sum = 0\n", - "for index, query in enumerate(test_data):\n", - " is_correct = user_proxy.initiate_chat(recipient=assistant, answer=query[\"answer\"], problem=query[\"question\"])\n", - " sum += is_correct\n", - "success_rate_with_agent_training = sum / 10" - ] - }, + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# AgentOptimizer: An Agentic Way to Train Your LLM Agent\n", + "\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", + "Please find documentation about this feature [here](https://docs.ag2.ai/docs/Use-Cases/agent_chat).\n", + "\n", + "In traditional ML pipeline, we train a model by updating its parameter according to the loss on the training set, while in the era of LLM agents, how should we train an agent? Here, we take an initial step towards the agent training. Inspired by the [function calling](https://platform.openai.com/docs/guides/function-calling) capabilities provided by OpenAI, we draw an analogy between model parameters and agent functions/skills, and update agent’s functions/skills based on its historical performance on the training set. As an agentic way of training an agent, our approach help enhance the agents’ abilities without requiring access to the LLMs parameters.\n", + "\n", + "In this notebook, we introduce a new class, ‘AgentOptimizer’, which is able to improve the function list of one Assistant-UserProxy pair according to the historical conversation histories.\n", + "This feature would support agents in improving their ability to solve problems of the same type as previous tasks.\n", + "Specifically, given a set of training data, AgentOptimizer would iteratively prompt the LLM to optimize the existing function list of the AssistantAgent and UserProxyAgent with code implementation if necessary. It also includes two strategies, roll-back, and early-stop, to streamline the training process.\n", + "In the example scenario, we test the proposed AgentOptimizer in solving problems from the [MATH dataset](https://github.com/hendrycks/math). \n", + "\n", + "![AgentOptimizer](https://media.githubusercontent.com/media/ag2ai/ag2/main/website/blog/2023-12-23-AgentOptimizer/img/agentoptimizer.png)\n", + "\n", + "More information could be found in the [paper](https://arxiv.org/abs/2402.11359).\n", + "\n", + "Authors:\n", + "- [Shaokun Zhang](https://github.com/skzhang1), Ph.D. student at the The Pennsylvania State University\n", + "- [Jieyu Zhang](https://jieyuz2.github.io), Ph.D. student at the University of Washington" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "import copy\n", + "import json\n", + "import os\n", + "from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union\n", + "\n", + "from openai import BadRequestError\n", + "\n", + "import autogen\n", + "from autogen import config_list_from_json\n", + "from autogen.agentchat import Agent\n", + "from autogen.agentchat.contrib.agent_optimizer import AgentOptimizer\n", + "from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent\n", + "from autogen.code_utils import extract_code\n", + "from autogen.math_utils import get_answer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MathUserProxy with function_call\n", + "\n", + "This agent is a customized MathUserProxy inherits from its [parent class](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/contrib/math_user_proxy_agent.py).\n", + "\n", + "It supports using both function_call and python to solve math problems.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "def is_termination_msg_mathchat(message):\n", + " \"\"\"Check if a message is a termination message.\"\"\"\n", + " if isinstance(message, dict):\n", + " message = message.get(\"content\")\n", + " if message is None:\n", + " return False\n", + " cb = extract_code(message)\n", + " contain_code = False\n", + " for c in cb:\n", + " if c[0] == \"python\":\n", + " contain_code = True\n", + " break\n", + " if message.rstrip().find(\"TERMINATE\") >= 0:\n", + " return True\n", + " return not contain_code and get_answer(message) is not None and get_answer(message) != \"\"\n", + "\n", + "\n", + "class MathUserProxyAgent(MathUserProxyAgent):\n", + " MAX_CONSECUTIVE_AUTO_REPLY = 15\n", + " DEFAULT_REPLY = \"Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\\\boxed{}.)\"\n", + " PROMPTS = \"\"\"Let's solve a math problem.\n", + "Query requirements:\n", + "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n", + "You can use packages like sympy to help you.\n", + "You must follow the formats below to write your code:\n", + "```python\n", + "# your code\n", + "```\n", + "If some packages are missing, you could also suggest a code to install the corresponding package.\n", + "\n", + "Please follow this process:\n", + "1. Solve the problem step by step (do not over-divide the steps).\n", + "2. Take out any queries that can be asked through Python code (for example, any calculations or equations that can be calculated) and functions you know in the context of this conversation.\n", + "\n", + "Please\n", + "(1) do not mix suggested Python codes and function calls in one step.\n", + "(2) You MUST remember that you don’t have a function named \"python\" available.\n", + "\n", + "You must follow the formats below to write your Python code:\n", + "```python\n", + "# your code\n", + "```\n", + "\n", + "3. Wait for me to give the results or wait for the executed results of the function call.\n", + "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n", + "\n", + "After all the queries are run and you get the answer, put the answer in \\\\boxed{}.\n", + "\n", + "Problem:\n", + "\"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " name: Optional[str] = \"MathChatAgent\",\n", + " is_termination_msg: Optional[Callable[[Dict], bool]] = is_termination_msg_mathchat,\n", + " human_input_mode: Literal[\"ALWAYS\", \"NEVER\", \"TERMINATE\"] = \"NEVER\",\n", + " default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,\n", + " max_invalid_q_per_step=3,\n", + " **kwargs,\n", + " ):\n", + " super().__init__(\n", + " name=name,\n", + " is_termination_msg=is_termination_msg,\n", + " human_input_mode=human_input_mode,\n", + " default_auto_reply=default_auto_reply,\n", + " max_invalid_q_per_step=max_invalid_q_per_step,\n", + " **kwargs,\n", + " )\n", + " del self._reply_func_list[2]\n", + " self.register_reply([Agent, None], MathUserProxyAgent._generate_math_reply, position=4)\n", + " del self._reply_func_list[3]\n", + " self.register_reply(\n", + " trigger=autogen.ConversableAgent, reply_func=MathUserProxyAgent.generate_function_call_reply, position=3\n", + " )\n", + " self.register_reply(\n", + " trigger=autogen.ConversableAgent, reply_func=MathUserProxyAgent._check_final_result, position=0\n", + " )\n", + "\n", + " self.max_function_call_trial = 3\n", + " self.query = None\n", + " self.answer = None\n", + " self.is_correct = None\n", + "\n", + " def generate_function_call_reply(\n", + " self,\n", + " messages: Optional[List[Dict]] = None,\n", + " sender: Optional[autogen.ConversableAgent] = None,\n", + " config: Optional[Any] = None,\n", + " ) -> Tuple[bool, Union[Dict, None]]:\n", + " \"\"\"Generate a reply using function call.\"\"\"\n", + " if messages is None:\n", + " messages = self._oai_messages[sender]\n", + " message = messages[-1]\n", + " if \"function_call\" in message:\n", + " is_exec_success, func_return = self.execute_function(message[\"function_call\"])\n", + " if is_exec_success:\n", + " self.max_function_call_trial = 3\n", + " return True, func_return\n", + " else:\n", + " if self.max_function_call_trial == 0:\n", + " error_message = func_return[\"content\"]\n", + " self.max_function_call_trial = 3\n", + " return (\n", + " True,\n", + " \"The func is executed failed many times. \"\n", + " + error_message\n", + " + \". Please directly reply me with TERMINATE. We need to terminate the conversation.\",\n", + " )\n", + " else:\n", + " revise_prompt = \"You may make a wrong function call (It may due the arguments you provided doesn't fit the function arguments like missing required positional argument). \\\n", + " If you think this error occurs due to you make a wrong function arguments input and you could make it success, please try to call this function again using the correct arguments. \\\n", + " Otherwise, the error may be caused by the function itself. Please directly reply me with TERMINATE. We need to terminate the conversation. \"\n", + " error_message = func_return[\"content\"]\n", + " return True, \"The func is executed failed.\" + error_message + revise_prompt\n", + " return False, None\n", + "\n", + " def initiate_chat(\n", + " self,\n", + " recipient,\n", + " answer: None,\n", + " silent: Optional[bool] = False,\n", + " **context,\n", + " ):\n", + " self.query = context[\"problem\"]\n", + " if not isinstance(answer, str):\n", + " answer = str(answer)\n", + " if answer.endswith(\".0\"):\n", + " answer = answer[:-2]\n", + " self._answer = answer\n", + " else:\n", + " self._answer = answer\n", + "\n", + " self.is_correct = None\n", + "\n", + " self._prepare_chat(recipient, True)\n", + " error_message = None\n", + " try:\n", + " prompt = self.PROMPTS + context[\"problem\"]\n", + " self.send(prompt, recipient, silent=silent)\n", + " except BadRequestError as e:\n", + " error_message = str(e)\n", + " self.is_correct = 0\n", + " print(\"error information: {}\".format(error_message))\n", + "\n", + " recipient.reset()\n", + " is_correct = copy.deepcopy(self.is_correct)\n", + " self._reset()\n", + " return is_correct\n", + "\n", + " def _check_final_result(\n", + " self,\n", + " messages: Optional[List[Dict]] = None,\n", + " sender: Optional[autogen.Agent] = None,\n", + " config: Optional[Any] = None,\n", + " ):\n", + " messages = messages[-1]\n", + " if isinstance(messages, dict):\n", + " messages = messages.get(\"content\")\n", + " if messages is None:\n", + " return False, None\n", + "\n", + " cb = extract_code(messages)\n", + " contain_code = False\n", + " for c in cb:\n", + " if c[0] == \"python\":\n", + " contain_code = True\n", + " break\n", + " if not contain_code and get_answer(messages) is not None and get_answer(messages) != \"\":\n", + " if get_answer(messages) == self._answer:\n", + " self.is_correct = 1\n", + " return True, \"The result is Correct. Please reply me with TERMINATE.\"\n", + " else:\n", + " self.is_correct = 0\n", + " return False, None\n", + " else:\n", + " return False, None\n", + "\n", + " def _reset(self):\n", + " super()._reset()\n", + " self.max_function_call_trial = 3\n", + " self.is_correct = None\n", + " self.query = None\n", + " self.answer = None" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load dataset\n", + "\n", + "MATAH dataset contains 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations. \n", + "\n", + "We strictly follow the [train](https://github.com/lifan-yuan/CRAFT/blob/main/tab_and_math/MATH/dataset/train/algebra.jsonl)/[test](https://github.com/lifan-yuan/CRAFT/blob/main/tab_and_math/MATH/dataset/algebra.jsonl) splits of [Craft](https://github.com/lifan-yuan/CRAFT). Please specific your own path to the dataset. Here we sample the first 10 algebra problems as examples. " + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "test_data, train_data = [], []\n", + "with open(\"MATH/dataset/algebra.jsonl\", \"r\", encoding=\"utf-8\") as f:\n", + " for line in f:\n", + " test_data.append(json.loads(line))\n", + "with open(\"MATH/dataset/train/algebra.jsonl\", \"r\", encoding=\"utf-8\") as f:\n", + " for line in f:\n", + " train_data.append(json.loads(line))\n", + "test_data, train_data = test_data[0:10], train_data[0:10]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agents construction\n", + "\n", + "Constructing MathUserProxyAgent and AssistantAgent used in solving these problems. Here, we use gpt-4-1106-preview to construct the AssistantAgent. " + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": [\n", + " {\n", + " \"model\": \"gpt-4-1106-preview\",\n", + " \"api_type\": \"azure\",\n", + " \"api_key\": os.environ[\"AZURE_OPENAI_API_KEY\"],\n", + " \"base_url\": \"https://ENDPOINT.openai.azure.com/\",\n", + " \"api_version\": \"2023-07-01-preview\",\n", + " }\n", + " ]\n", + "}\n", + "\n", + "assistant = autogen.AssistantAgent(\n", + " name=\"assistant\",\n", + " system_message=\"You are a helpful assistant.\",\n", + " llm_config=llm_config,\n", + ")\n", + "user_proxy = MathUserProxyAgent(\n", + " name=\"mathproxyagent\",\n", + " human_input_mode=\"NEVER\",\n", + " code_execution_config={\"work_dir\": \"_output\", \"use_docker\": False},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test without agent optimizations \n", + "\n", + "Below is the code to get the performance without the agents optimization process. \n", + "\n", + "In this case, the AssistantAgent and MathUserProxyAgent don't have any function calls but solely solve problems with Python." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sum = 0\n", + "for index, query in enumerate(test_data):\n", + " is_correct = user_proxy.initiate_chat(recipient=assistant, answer=query[\"answer\"], problem=query[\"question\"])\n", + " print(is_correct)\n", + " sum += is_correct\n", + "success_rate_without_agent_training = sum / 10" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agent Training \n", + "\n", + "Then, we use the AgentOptimizer to iteratively optimize the agents by optimizing the function calls according to the historical conversations and performance.\n", + "The AgentOptimizer yields register_for_llm and register_for_executor at each iteration, which are subsequently utilized to update the assistant and user_proxy agents, respectively. \n", + "Here we optimize these two agents for ten epochs. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "EPOCH = 10\n", + "optimizer_model = \"gpt-4-1106-preview\"\n", + "optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config, optimizer_model=optimizer_model)\n", + "for i in range(EPOCH):\n", + " for index, query in enumerate(train_data):\n", + " is_correct = user_proxy.initiate_chat(assistant, answer=query[\"answer\"], problem=query[\"question\"])\n", + " history = assistant.chat_messages_for_summary(user_proxy)\n", + " optimizer.record_one_conversation(history, is_satisfied=is_correct)\n", + " register_for_llm, register_for_exector = optimizer.step()\n", + " for item in register_for_llm:\n", + " assistant.update_function_signature(**item)\n", + " if len(register_for_exector.keys()) > 0:\n", + " user_proxy.register_function(function_map=register_for_exector)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test with agent optimizations \n", + "\n", + "After agent optimization, the agents obtained a list of functions from the AgentOptimizers after 10 optimization iterations as shown below.\n", + "\n", + "We then show the final performances with/without the agent optimization process. We observe the agents after optimization are obviously better.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sum = 0\n", + "for index, query in enumerate(test_data):\n", + " is_correct = user_proxy.initiate_chat(recipient=assistant, answer=query[\"answer\"], problem=query[\"question\"])\n", + " sum += is_correct\n", + "success_rate_with_agent_training = sum / 10" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "------------------------------------------------Functions learned------------------------------------------------\n", - "evaluate_expression: Evaluate arithmetic or mathematical expressions provided as strings.\n", - "\n", - "calculate_compound_interest_principal: Calculate the principal amount needed to achieve a certain future value with quarterly compound interest.\n", - "\n", - "solve_linear_system: Solve a system of linear equations represented as coefficients and variables.\n", - "\n", - "------------------------------------------------Summary------------------------------------------------\n", - "\n", - "success_rate_without_agent_training: 60.0%\n", - "\n", - "success_rate_with_agent_training: 90.0%\n", - "\n" - ] - } - ], - "source": [ - "print(\n", - " \"------------------------------------------------Functions learned------------------------------------------------\"\n", - ")\n", - "for func in assistant.llm_config[\"functions\"]:\n", - " print(func[\"name\"] + \": \" + func[\"description\"] + \"\\n\")\n", - "print(\"------------------------------------------------Summary------------------------------------------------\\n\")\n", - "print(\"success_rate_without_agent_training: {average}%\\n\".format(average=success_rate_without_agent_training * 100))\n", - "print(\"success_rate_with_agent_training: {average}%\\n\".format(average=success_rate_with_agent_training * 100))" - ] - } - ], - "metadata": { - "front_matter": { - "description": "AgentOptimizer is able to prompt LLMs to iteratively optimize function/skills of AutoGen agents according to the historical conversation and performance.", - "tags": [ - "optimization", - "tool/function" - ] - }, - "kernelspec": { - "display_name": "py3.9", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.18" + "name": "stdout", + "output_type": "stream", + "text": [ + "------------------------------------------------Functions learned------------------------------------------------\n", + "evaluate_expression: Evaluate arithmetic or mathematical expressions provided as strings.\n", + "\n", + "calculate_compound_interest_principal: Calculate the principal amount needed to achieve a certain future value with quarterly compound interest.\n", + "\n", + "solve_linear_system: Solve a system of linear equations represented as coefficients and variables.\n", + "\n", + "------------------------------------------------Summary------------------------------------------------\n", + "\n", + "success_rate_without_agent_training: 60.0%\n", + "\n", + "success_rate_with_agent_training: 90.0%\n", + "\n" + ] } + ], + "source": [ + "print(\n", + " \"------------------------------------------------Functions learned------------------------------------------------\"\n", + ")\n", + "for func in assistant.llm_config[\"functions\"]:\n", + " print(func[\"name\"] + \": \" + func[\"description\"] + \"\\n\")\n", + "print(\"------------------------------------------------Summary------------------------------------------------\\n\")\n", + "print(\"success_rate_without_agent_training: {average}%\\n\".format(average=success_rate_without_agent_training * 100))\n", + "print(\"success_rate_with_agent_training: {average}%\\n\".format(average=success_rate_with_agent_training * 100))" + ] + } + ], + "metadata": { + "front_matter": { + "description": "AgentOptimizer is able to prompt LLMs to iteratively optimize function/skills of AutoGen agents according to the historical conversation and performance.", + "tags": [ + "optimization", + "tool/function" + ] + }, + "kernelspec": { + "display_name": "py3.9", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 2 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/notebook/agentchat_dalle_and_gpt4v.ipynb b/notebook/agentchat_dalle_and_gpt4v.ipynb index d4278c96d1..540bd61d1e 100644 --- a/notebook/agentchat_dalle_and_gpt4v.ipynb +++ b/notebook/agentchat_dalle_and_gpt4v.ipynb @@ -36,12 +36,12 @@ "import time\n", "from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union\n", "\n", - "import matplotlib.pyplot as plt\n", "import PIL\n", + "import matplotlib.pyplot as plt\n", "import requests\n", + "from PIL import Image\n", "from diskcache import Cache\n", "from openai import OpenAI\n", - "from PIL import Image\n", "from termcolor import colored\n", "\n", "import autogen\n", @@ -621,11 +621,11 @@ ], "metadata": { "front_matter": { - "description": "Multimodal agent chat with DALL-E and GPT-4v.", - "tags": [ - "multimodal", - "gpt-4v" - ] + "description": "Multimodal agent chat with DALL-E and GPT-4v.", + "tags": [ + "multimodal", + "gpt-4v" + ] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/notebook/agentchat_databricks_dbrx.ipynb b/notebook/agentchat_databricks_dbrx.ipynb index 74d391e5e4..2946ce1532 100644 --- a/notebook/agentchat_databricks_dbrx.ipynb +++ b/notebook/agentchat_databricks_dbrx.ipynb @@ -510,6 +510,7 @@ "class Databricks_AutoGenLogger:\n", " def __init__(self):\n", " from pyspark.sql import SparkSession\n", + "\n", " import autogen\n", "\n", " self.spark = SparkSession.builder.getOrCreate()\n", diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 3abe706acb..0fb03428f6 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -37,6 +37,7 @@ "metadata": {}, "outputs": [], "source": [ + "import asyncio\n", "import time\n", "\n", "from typing_extensions import Annotated\n", @@ -100,7 +101,7 @@ "@coder.register_for_llm(description=\"create a timer for N seconds\")\n", "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", + " asyncio.sleep(1)\n", " # should print to stdout\n", " return \"Timer is done!\"\n", "\n", @@ -369,7 +370,7 @@ ] }, "kernelspec": { - "display_name": "flaml_dev", + "display_name": ".venv-3.9", "language": "python", "name": "python3" }, @@ -383,7 +384,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.20" } }, "nbformat": 4, diff --git a/notebook/agentchat_nested_chats_chess_altmodels.ipynb b/notebook/agentchat_nested_chats_chess_altmodels.ipynb index 0a3f0bd119..2444b86a9b 100644 --- a/notebook/agentchat_nested_chats_chess_altmodels.ipynb +++ b/notebook/agentchat_nested_chats_chess_altmodels.ipynb @@ -119,10 +119,12 @@ "made_move = False\n", "\n", "\n", - "def get_legal_moves() -> Annotated[\n", - " str,\n", - " \"Call this tool to list of all legal chess moves on the board, output is a list in UCI format, e.g. e2e4,e7e5,e7e8q.\",\n", - "]:\n", + "def get_legal_moves() -> (\n", + " Annotated[\n", + " str,\n", + " \"Call this tool to list of all legal chess moves on the board, output is a list in UCI format, e.g. e2e4,e7e5,e7e8q.\",\n", + " ]\n", + "):\n", " return \"Possible moves are: \" + \",\".join([str(move) for move in board.legal_moves])\n", "\n", "\n", @@ -130,7 +132,7 @@ " move: Annotated[\n", " str,\n", " \"Call this tool to make a move after you have the list of legal moves and want to make a move. Takes UCI format, e.g. e2e4 or e7e5 or e7e8q.\",\n", - " ]\n", + " ],\n", ") -> Annotated[str, \"Result of the move.\"]:\n", " move = chess.Move.from_uci(move)\n", " board.push_uci(str(move))\n", diff --git a/notebook/lats_search.ipynb b/notebook/lats_search.ipynb index 4edd6b1f84..41a1736980 100644 --- a/notebook/lats_search.ipynb +++ b/notebook/lats_search.ipynb @@ -869,7 +869,6 @@ " logger = logging.getLogger(__name__)\n", "\n", " try:\n", - "\n", " state = {\"input\": input_query, \"root\": None}\n", " try:\n", " state = generate_initial_response(state)\n", diff --git a/pyproject.toml b/pyproject.toml index 560fe7cb85..724182cc44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,7 @@ dependencies = [ ] [project.optional-dependencies] + # public distributions jupyter-executor = [ "jupyter-kernel-gateway", @@ -138,11 +139,6 @@ interop =[ "pyautogen[interop-crewai, interop-langchain, interop-pydantic-ai]", ] -types = [ - "mypy==1.9.0", - "pyautogen[test, jupyter-executor, interop]", -] - # pysqlite3-binary used so it doesn't need to compile pysqlite3 autobuild = ["chromadb", "sentence-transformers", "huggingface-hub", "pysqlite3-binary"] @@ -174,7 +170,9 @@ cohere = ["cohere>=5.5.8"] ollama = ["ollama>=0.3.3", "fix_busted_json>=0.0.18"] bedrock = ["boto3>=1.34.149"] -# dev dependencies +## dev dependencies + +# test dependencies test = [ "ipykernel", "nbconvert", @@ -187,6 +185,25 @@ test = [ "fastapi>=0.115.0,<1", ] +types = [ + "mypy==1.9.0", + "pyautogen[test, jupyter-executor, interop]", +] + +lint = [ + "ruff==0.8.6", + "codespell==2.3.0", + "pyupgrade-directories==0.3.0", +] + +dev = [ + "pyautogen[lint,test,types]", + "pre-commit==4.0.1", + "detect-secrets==1.5.0", + "uv==0.5.16", +] + + [project.urls] Homepage = "https://ag2.ai/" Documentation = "https://docs.ag2.ai/docs/Home" @@ -224,21 +241,43 @@ exclude = "(.eggs|.git|.hg|.mypy_cache|.venv|_build|buck-out|build|dist)" [tool.ruff] +fix = true line-length = 120 +target-version = 'py39' +#include = ["autogen", "test", "docs"] +#exclude = [] [tool.ruff.lint] # Enable Pyflakes `E` and `F` codes by default. select = [ - "E", - "W", # see: https://pypi.org/project/pycodestyle - "F", # see: https://pypi.org/project/pyflakes - # "D", # see: https://pypi.org/project/pydocstyle - # "N", # see: https://pypi.org/project/pep8-naming - # "S", # see: https://pypi.org/project/flake8-bandit - "I", # see: https://pypi.org/project/isort/ + "E", # pycodestyle errors https://docs.astral.sh/ruff/rules/#error-e + "W", # pycodestyle warnings https://docs.astral.sh/ruff/rules/#warning-w + "C90", # mccabe https://docs.astral.sh/ruff/rules/#mccabe-c90 +# "N", # pep8-naming https://docs.astral.sh/ruff/rules/#pep8-naming-n +# "D", # pydocstyle https://docs.astral.sh/ruff/rules/#pydocstyle-d + "I", # isort https://docs.astral.sh/ruff/rules/#isort-i + "F", # pyflakes https://docs.astral.sh/ruff/rules/#pyflakes-f + "ASYNC", # flake8-async https://docs.astral.sh/ruff/rules/#flake8-async-async +# "C4", # flake8-comprehensions https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 +# "B", # flake8-bugbear https://docs.astral.sh/ruff/rules/#flake8-bugbear-b + "Q", # flake8-quotes https://docs.astral.sh/ruff/rules/#flake8-quotes-q +# "T20", # flake8-print https://docs.astral.sh/ruff/rules/#flake8-print-t20 +# "SIM", # flake8-simplify https://docs.astral.sh/ruff/rules/#flake8-simplify-sim +# "PT", # flake8-pytest-style https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt +# "PTH", # flake8-use-pathlib https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth +# "TCH", # flake8-type-checking https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch +# "RUF", # Ruff-specific rules https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf +# "PERF", # Perflint https://docs.astral.sh/ruff/rules/#perflint-perf ] -ignore = ["E501", "F401", "F403", "C901"] +ignore = ["E501", "F401", "F403", "C901", + "E402", + "E721", + "ASYNC109", + "E501", # line too long, handled by formatter later + "D100", "D101", "D102", "D103", "D104", + "C901", # too complex +] # Exclude a variety of commonly ignored directories. exclude = [ ".eggs", @@ -261,6 +300,16 @@ unfixable = ["F401"] # Unlike Flake8, default to a complexity level of 10. max-complexity = 10 +[tool.ruff.lint.isort] +case-sensitive = true + +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint.pydocstyle] +convention = "google" + + [tool.mypy] files = [ "autogen/logger", @@ -309,3 +358,8 @@ warn_unused_ignores = false disallow_incomplete_defs = true disallow_untyped_decorators = true disallow_any_unimported = true + +[tool.codespell] +#skip = "./venv*,./docs/site/*,./htmlcov,./examples/openapi/whatsapp_openapi_complete.json" +skip = "paul_graham_essay.txt,attractions.json,website,notebook,dotnet" +ignore-words = ".codespell-whitelist.txt" diff --git a/scripts/lint.sh b/scripts/lint.sh new file mode 100755 index 0000000000..1d43b9901d --- /dev/null +++ b/scripts/lint.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +echo "Running ruff linter (isort, flake, pyupgrade, etc. replacement)..." +ruff check + +echo "Running ruff formatter (black replacement)..." +ruff format diff --git a/scripts/pre-commit-lint.sh b/scripts/pre-commit-lint.sh new file mode 100755 index 0000000000..45899b465b --- /dev/null +++ b/scripts/pre-commit-lint.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# from: https://jaredkhan.com/blog/mypy-pre-commit + +# A script for running mypy, +# with all its dependencies installed. + +set -o errexit + +# Change directory to the project root directory. +cd "$(dirname "$0")"/.. + +# Install the dependencies into the mypy env. +# Note that this can take seconds to run. +# In my case, I need to use a custom index URL. +# Avoid pip spending time quietly retrying since +# likely cause of failure is lack of VPN connection. +pip uninstall pyautogen --yes --quiet + +pip install --editable ".[dev]" \ + --retries 1 \ + --no-input \ + --quiet + +# Run on all files, +# ignoring the paths passed to this script, +# so as not to miss type errors. +# My repo makes use of namespace packages. +# Use the namespace-packages flag +# and specify the package to run on explicitly. +# Note that we do not use --ignore-missing-imports, +# as this can give us false confidence in our results. +# mypy fastagency +./scripts/lint.sh diff --git a/scripts/pre-commit-mypy-run.sh b/scripts/pre-commit-mypy-run.sh index 1e2bd7beba..b48d4587ed 100755 --- a/scripts/pre-commit-mypy-run.sh +++ b/scripts/pre-commit-mypy-run.sh @@ -10,6 +10,8 @@ set -o errexit # Change directory to the project root directory. cd "$(dirname "$0")"/.. +pip uninstall pyautogen --yes --quiet + pip install -q -e .[types] mypy diff --git a/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py b/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py index 18175109c7..ae16607464 100755 --- a/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py +++ b/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py @@ -8,7 +8,7 @@ import os import sys -from typing import Generator +from collections.abc import Generator import pytest diff --git a/test/agentchat/contrib/test_agent_builder.py b/test/agentchat/contrib/test_agent_builder.py index b4622d79dc..6cb94fa906 100755 --- a/test/agentchat/contrib/test_agent_builder.py +++ b/test/agentchat/contrib/test_agent_builder.py @@ -158,7 +158,6 @@ def test_save(builder: AgentBuilder): reason=reason, ) def test_load(builder: AgentBuilder): - config_save_path = f"{here}/example_test_agent_builder_config.json" json.load(open(config_save_path)) diff --git a/test/agentchat/contrib/test_swarm.py b/test/agentchat/contrib/test_swarm.py index 428bcf71ef..47270acc6f 100644 --- a/test/agentchat/contrib/test_swarm.py +++ b/test/agentchat/contrib/test_swarm.py @@ -7,11 +7,11 @@ import pytest from autogen.agentchat.contrib.swarm_agent import ( - __CONTEXT_VARIABLES_PARAM_NAME__, - __TOOL_EXECUTOR_NAME__, AFTER_WORK, ON_CONDITION, UPDATE_SYSTEM_MESSAGE, + __CONTEXT_VARIABLES_PARAM_NAME__, + __TOOL_EXECUTOR_NAME__, AfterWorkOption, SwarmAgent, SwarmResult, @@ -158,7 +158,6 @@ def test_resume_speaker(): patch.object(test_initial_agent, "initiate_chat") as mock_initial_chat, patch.object(test_second_agent, "initiate_chat") as mock_second_chat, ): - mock_chat_result = MagicMock() mock_chat_result.chat_history = multiple_messages diff --git a/test/agentchat/test_chats.py b/test/agentchat/test_chats.py index c936e95fde..98a22e747a 100755 --- a/test/agentchat/test_chats.py +++ b/test/agentchat/test_chats.py @@ -6,8 +6,9 @@ # SPDX-License-Identifier: MIT #!/usr/bin/env python3 -m pytest +from collections.abc import Generator from tempfile import TemporaryDirectory -from typing import Annotated, Generator, Literal, TypeVar +from typing import Annotated, Literal, TypeVar import pytest diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index c60139801f..83b557a675 100755 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1045,7 +1045,6 @@ def stopwatch(num_seconds: Annotated[str, "Number of seconds in the stopwatch."] @pytest.mark.skipif(skip_openai, reason=reason) def test_max_turn(credentials_gpt_4o_mini: Credentials) -> None: - # create an AssistantAgent instance named "assistant" assistant = autogen.AssistantAgent( name="assistant", @@ -1346,7 +1345,6 @@ def bob_initiate_chat(agent: ConversableAgent, text: Literal["past", "future"]): def test_http_client(): - import httpx with pytest.raises(TypeError): @@ -1367,7 +1365,6 @@ def test_http_client(): def test_adding_duplicate_function_warning(): - config_base = [{"base_url": "http://0.0.0.0:8000", "api_key": "NULL"}] agent = autogen.ConversableAgent( diff --git a/test/agentchat/test_function_call.py b/test/agentchat/test_function_call.py index e6b2f60654..66906ffa82 100755 --- a/test/agentchat/test_function_call.py +++ b/test/agentchat/test_function_call.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MIT #!/usr/bin/env python3 -m pytest +import asyncio import json import sys @@ -167,7 +168,7 @@ async def test_a_execute_function(): # Create an async function async def add_num(num_to_be_added): given_num = 10 - time.sleep(1) + asyncio.sleep(1) return str(num_to_be_added + given_num) user = UserProxyAgent(name="test", function_map={"add_num": add_num}) diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index ef9420eedc..0477ad5682 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -1543,9 +1543,9 @@ def test_speaker_selection_auto_process_result(): assert groupchat._process_speaker_selection_result(chat_result, cmo, agent_list) == pm ### Agent not selected successfully - chat_result.chat_history[3][ - "content" - ] = "[AGENT SELECTION FAILED]Select speaker attempt #3 of 3 failed as it did not include any agent names." + chat_result.chat_history[3]["content"] = ( + "[AGENT SELECTION FAILED]Select speaker attempt #3 of 3 failed as it did not include any agent names." + ) # The next speaker in the list will be selected, which will be the Product_Manager (as the last speaker is the Chief_Marketing_Officer) assert groupchat._process_speaker_selection_result(chat_result, cmo, agent_list) == pm diff --git a/test/coding/test_markdown_code_extractor.py b/test/coding/test_markdown_code_extractor.py index b63bc67531..66c6e2940d 100644 --- a/test/coding/test_markdown_code_extractor.py +++ b/test/coding/test_markdown_code_extractor.py @@ -59,9 +59,7 @@ def scrape(url): text = soup.find("div", {"id": "bodyContent"}).text return title, text ``` -""".replace( - "\n", "\r\n" -) +""".replace("\n", "\r\n") _message_5 = """ Test bash script: diff --git a/test/coding/test_user_defined_functions.py b/test/coding/test_user_defined_functions.py index 96d8419748..a81e30049e 100644 --- a/test/coding/test_user_defined_functions.py +++ b/test/coding/test_user_defined_functions.py @@ -205,7 +205,6 @@ def add_two_numbers(a: int, b: int) -> int: @pytest.mark.parametrize("cls", classes_to_test) def test_cant_load_broken_str_function_with_reqs(cls) -> None: - with pytest.raises(ValueError): _ = FunctionWithRequirements.from_str( ''' diff --git a/test/interop/pydantic_ai/test_pydantic_ai.py b/test/interop/pydantic_ai/test_pydantic_ai.py index 4fef76ca27..4b58568ae2 100644 --- a/test/interop/pydantic_ai/test_pydantic_ai.py +++ b/test/interop/pydantic_ai/test_pydantic_ai.py @@ -72,7 +72,6 @@ def test_with_llm(self, credentials_gpt_4o: Credentials) -> None: sys.version_info < (3, 9), reason="Only Python 3.9 and above are supported for LangchainInteroperability" ) class TestPydanticAIInteroperabilityDependencyInjection: - def test_dependency_injection(self) -> None: def f( ctx: RunContext[int], # type: ignore[valid-type] diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 8b72bc587d..fe4f6deb47 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -18,8 +18,8 @@ ConversableAgentUsageSummaryMessage, ConversableAgentUsageSummaryNoCostIncurredMessage, ExecuteCodeBlockMessage, - ExecutedFunctionMessage, ExecuteFunctionMessage, + ExecutedFunctionMessage, FunctionCall, FunctionCallMessage, FunctionResponseMessage, diff --git a/test/messages/test_base_message.py b/test/messages/test_base_message.py index 23974e9dbc..1933af8e70 100644 --- a/test/messages/test_base_message.py +++ b/test/messages/test_base_message.py @@ -2,8 +2,9 @@ # # SPDX-License-Identifier: Apache-2.0 +from collections.abc import Generator from contextlib import contextmanager -from typing import Generator, Type +from typing import Type from uuid import UUID, uuid4 import pytest @@ -18,7 +19,7 @@ @pytest.fixture() -def TestMessage() -> Generator[Type[BaseMessage], None, None]: +def TestMessage() -> Generator[type[BaseMessage], None, None]: org_message_classes = _message_classes.copy() try: @@ -35,7 +36,7 @@ class TestMessage(BaseMessage): class TestBaseMessage: - def test_model_dump_validate(self, TestMessage: Type[BaseModel], uuid: UUID) -> None: + def test_model_dump_validate(self, TestMessage: type[BaseModel], uuid: UUID) -> None: # print(f"{TestMessage=}") message = TestMessage(uuid=uuid, sender="sender", receiver="receiver", content="Hello, World!") diff --git a/test/oai/test_bedrock.py b/test/oai/test_bedrock.py index b059ee138d..ed5cb6b147 100644 --- a/test/oai/test_bedrock.py +++ b/test/oai/test_bedrock.py @@ -34,7 +34,6 @@ def __init__(self, text, choices, usage, cost, model): @pytest.fixture def bedrock_client(): - # Set Bedrock client with some default values client = BedrockClient() @@ -49,7 +48,6 @@ def bedrock_client(): # Test initialization and configuration @pytest.mark.skipif(skip, reason=skip_reason) def test_initialization(): - # Creation works without an api_key as it's handled in the parameter parsing BedrockClient() @@ -228,7 +226,6 @@ def test_create_response_with_tool_call(mock_chat, bedrock_client): # Test message conversion from OpenAI to Bedrock format @pytest.mark.skipif(skip, reason=skip_reason) def test_oai_messages_to_bedrock_messages(bedrock_client): - # Test that the "name" key is removed and system messages converted to user message test_messages = [ {"role": "system", "content": "You are a helpful AI bot."}, diff --git a/test/oai/test_cerebras.py b/test/oai/test_cerebras.py index b9dc2c786b..2d10f8cae9 100644 --- a/test/oai/test_cerebras.py +++ b/test/oai/test_cerebras.py @@ -43,7 +43,6 @@ def cerebras_client(): # Test initialization and configuration @pytest.mark.skipif(skip, reason=skip_reason) def test_initialization(): - # Missing any api_key with pytest.raises(AssertionError) as assertinfo: CerebrasClient() # Should raise an AssertionError due to missing api_key diff --git a/test/oai/test_client.py b/test/oai/test_client.py index 37fbf8d03a..da24dcca6d 100755 --- a/test/oai/test_client.py +++ b/test/oai/test_client.py @@ -157,7 +157,6 @@ def test_usage_summary(credentials_azure_gpt_35_turbo_instruct: Credentials): @pytest.mark.skipif(skip, reason="openai>=1 not installed") def test_legacy_cache(credentials_gpt_4o_mini: Credentials): - # Prompt to use for testing. prompt = "Write a 100 word summary on the topic of the history of human civilization." @@ -221,7 +220,6 @@ def test_legacy_cache(credentials_gpt_4o_mini: Credentials): @pytest.mark.skipif(skip, reason="openai>=1 not installed") def test_cache(credentials_gpt_4o_mini: Credentials): - # Prompt to use for testing. prompt = "Write a 100 word summary on the topic of the history of artificial intelligence." diff --git a/test/oai/test_groq.py b/test/oai/test_groq.py index 5e331f85aa..50e3dcee64 100644 --- a/test/oai/test_groq.py +++ b/test/oai/test_groq.py @@ -43,7 +43,6 @@ def groq_client(): # Test initialization and configuration @pytest.mark.skipif(skip, reason=skip_reason) def test_initialization(): - # Missing any api_key with pytest.raises(AssertionError) as assertinfo: GroqClient() # Should raise an AssertionError due to missing api_key diff --git a/test/oai/test_mistral.py b/test/oai/test_mistral.py index 588b24392a..93d1bbf920 100644 --- a/test/oai/test_mistral.py +++ b/test/oai/test_mistral.py @@ -51,7 +51,6 @@ def mistral_client(): # Test initialization and configuration @pytest.mark.skipif(skip, reason="Mistral.AI dependency is not installed") def test_initialization(): - # Missing any api_key with pytest.raises(AssertionError) as assertinfo: MistralAIClient() # Should raise an AssertionError due to missing api_key diff --git a/test/oai/test_ollama.py b/test/oai/test_ollama.py index 0b25decde7..33d94c0e8e 100644 --- a/test/oai/test_ollama.py +++ b/test/oai/test_ollama.py @@ -34,7 +34,6 @@ def __init__(self, text, choices, usage, cost, model): @pytest.fixture def ollama_client(): - # Set Ollama client with some default values client = OllamaClient() @@ -50,7 +49,6 @@ def ollama_client(): # Test initialization and configuration @pytest.mark.skipif(skip, reason=skip_reason) def test_initialization(): - # Creation works without an api_key OllamaClient() diff --git a/test/oai/test_together.py b/test/oai/test_together.py index bff18d1b7a..b3b508ba37 100644 --- a/test/oai/test_together.py +++ b/test/oai/test_together.py @@ -42,7 +42,6 @@ def together_client(): # Test initialization and configuration @pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") def test_initialization(): - # Missing any api_key with pytest.raises(AssertionError) as assertinfo: TogetherClient() # Should raise an AssertionError due to missing api_key @@ -198,7 +197,6 @@ def test_create_response(mock_create, together_client): @pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") @patch("autogen.oai.together.TogetherClient.create") def test_create_response_with_tool_call(mock_create, together_client): - # Define the mock response directly within the patch mock_function = MagicMock(name="currency_calculator") mock_function.name = "currency_calculator" diff --git a/test/test_code_utils.py b/test/test_code_utils.py index 7565f439bf..9215cb33cb 100755 --- a/test/test_code_utils.py +++ b/test/test_code_utils.py @@ -161,9 +161,7 @@ def scrape(url): text = soup.find("div", {"id": "bodyContent"}).text return title, text ``` -""".replace( - "\n", "\r\n" - ) +""".replace("\n", "\r\n") ) print(codeblocks) assert len(codeblocks) == 1 and codeblocks[0][0] == "python" diff --git a/test/test_retrieve_utils.py b/test/test_retrieve_utils.py index f9e8d71b74..16e1b2be40 100755 --- a/test/test_retrieve_utils.py +++ b/test/test_retrieve_utils.py @@ -9,6 +9,7 @@ """ Unit test for retrieve_utils.py """ + import pytest try: diff --git a/test/tools/test_function_utils.py b/test/tools/test_function_utils.py index 7bcdbb0cfe..b3f7428c1a 100644 --- a/test/tools/test_function_utils.py +++ b/test/tools/test_function_utils.py @@ -148,7 +148,11 @@ def _f3() -> None: def test_get_parameters() -> None: - def f(a: Annotated[str, AG2Field(description="Parameter a")], b=1, c: Annotated[float, AG2Field(description="Parameter c")] = 1.0): # type: ignore[no-untyped-def] + def f( # type: ignore[no-untyped-def] + a: Annotated[str, AG2Field(description="Parameter a")], + b=1, # type: ignore[no-untyped-def] + c: Annotated[float, AG2Field(description="Parameter c")] = 1.0, + ): pass typed_signature = get_typed_signature(f) diff --git a/website/docs/topics/code-execution/custom-executor.ipynb b/website/docs/topics/code-execution/custom-executor.ipynb index 8fae6aea19..a491f381d6 100644 --- a/website/docs/topics/code-execution/custom-executor.ipynb +++ b/website/docs/topics/code-execution/custom-executor.ipynb @@ -51,7 +51,6 @@ "outputs": [], "source": [ "class NotebookExecutor(CodeExecutor):\n", - "\n", " @property\n", " def code_extractor(self) -> CodeExtractor:\n", " # Extact code from markdown blocks.\n", diff --git a/website/process_notebooks.py b/website/process_notebooks.py index 8a81d37bed..e17265f5d3 100755 --- a/website/process_notebooks.py +++ b/website/process_notebooks.py @@ -316,7 +316,6 @@ def get_error_info(nb: NotebookNode) -> NotebookError | None: def add_front_matter_to_metadata_mdx( front_matter: dict[str, str | list[str]], website_dir: Path, rendered_mdx: Path ) -> None: - source = front_matter.get("source_notebook") if isinstance(source, str) and source.startswith("/website/docs/"): return From 0aea10c1de165563e1168ef2e99f376cbec09959 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 9 Jan 2025 14:28:19 +0100 Subject: [PATCH 2/2] Rule N added to ruff check --- autogen/_pydantic.py | 8 +- .../get_youtube_caption.py | 11 +- .../contrib/llamaindex_conversable_agent.py | 8 +- .../contrib/math_user_proxy_agent.py | 1 + .../contrib/retrieve_user_proxy_agent.py | 12 +- autogen/agentchat/contrib/swarm_agent.py | 6 +- .../realtime_agent/oai_realtime_client.py | 4 +- autogen/exception_utils.py | 10 +- autogen/graph_utils.py | 8 +- autogen/messages/base_message.py | 6 +- autogen/oai/client.py | 24 +-- autogen/oai/client_utils.py | 2 +- autogen/oai/completion.py | 10 +- autogen/oai/gemini.py | 1 + autogen/retrieve_utils.py | 2 +- autogen/token_count_utils.py | 6 +- autogen/tools/dependency_injection.py | 2 +- notebook/agentchat_databricks_dbrx.ipynb | 4 +- notebook/agentchat_websockets.ipynb | 6 +- pyproject.toml | 7 +- .../test_llamaindex_conversable_agent.py | 4 +- test/agentchat/realtime_agent/test_e2e.py | 2 +- test/agentchat/test_conversable_agent.py | 6 +- test/agentchat/test_dependancy_injection.py | 191 ++++++++++-------- test/messages/test_base_message.py | 4 +- test/oai/test_custom_client.py | 14 +- test/test_notebook.py | 2 +- test/tools/test_dependency_injection.py | 8 +- website/docs/topics/code-execution/.gitignore | 1 + .../user-defined-functions.ipynb | 156 ++------------ 30 files changed, 214 insertions(+), 312 deletions(-) create mode 100644 website/docs/topics/code-execution/.gitignore diff --git a/autogen/_pydantic.py b/autogen/_pydantic.py index 09d272508e..a06bf986e7 100644 --- a/autogen/_pydantic.py +++ b/autogen/_pydantic.py @@ -77,11 +77,11 @@ def type2schema(t: Any) -> JsonSchemaValue: return {"anyOf": [type2schema(tt) for tt in get_args(t)]} # we need to support both syntaxes for Tuple elif get_origin(t) in [Tuple, tuple]: - prefixItems = [type2schema(tt) for tt in get_args(t)] + prefix_items = [type2schema(tt) for tt in get_args(t)] return { - "maxItems": len(prefixItems), - "minItems": len(prefixItems), - "prefixItems": prefixItems, + "maxItems": len(prefix_items), + "minItems": len(prefix_items), + "prefixItems": prefix_items, "type": "array", } else: diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py index 33f594093e..ab793ac056 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py @@ -4,7 +4,10 @@ # alternative api: https://rapidapi.com/omarmhaimdat/api/youtube-v2 -def get_youtube_caption(videoId): +from typing import Any + + +def get_youtube_caption(video_id: str) -> Any: """ Retrieves the captions for a YouTube video. @@ -21,13 +24,13 @@ def get_youtube_caption(videoId): import requests - RAPID_API_KEY = os.environ["RAPID_API_KEY"] - video_url = f"https://www.youtube.com/watch?v={videoId}" + rapid_api_key = os.environ["RAPID_API_KEY"] + video_url = f"https://www.youtube.com/watch?v={video_id: str}" url = "https://youtube-transcript3.p.rapidapi.com/api/transcript-with-url" querystring = {"url": video_url, "lang": "en", "flat_text": "true"} - headers = {"X-RapidAPI-Key": RAPID_API_KEY, "X-RapidAPI-Host": "youtube-transcript3.p.rapidapi.com"} + headers = {"X-RapidAPI-Key": rapid_api_key, "X-RapidAPI-Host": "youtube-transcript3.p.rapidapi.com"} response = requests.get(url, headers=headers, params=querystring) response = response.json() diff --git a/autogen/agentchat/contrib/llamaindex_conversable_agent.py b/autogen/agentchat/contrib/llamaindex_conversable_agent.py index a9973f39e3..90814bd1d2 100644 --- a/autogen/agentchat/contrib/llamaindex_conversable_agent.py +++ b/autogen/agentchat/contrib/llamaindex_conversable_agent.py @@ -87,9 +87,9 @@ def _generate_oai_reply( """Generate a reply using autogen.oai.""" user_message, history = self._extract_message_and_history(messages=messages, sender=sender) - chatResponse: AgentChatResponse = self._llama_index_agent.chat(message=user_message, chat_history=history) + chat_response: AgentChatResponse = self._llama_index_agent.chat(message=user_message, chat_history=history) - extracted_response = chatResponse.response + extracted_response = chat_response.response return (True, extracted_response) @@ -102,11 +102,11 @@ async def _a_generate_oai_reply( """Generate a reply using autogen.oai.""" user_message, history = self._extract_message_and_history(messages=messages, sender=sender) - chatResponse: AgentChatResponse = await self._llama_index_agent.achat( + chat_response: AgentChatResponse = await self._llama_index_agent.achat( message=user_message, chat_history=history ) - extracted_response = chatResponse.response + extracted_response = chat_response.response return (True, extracted_response) diff --git a/autogen/agentchat/contrib/math_user_proxy_agent.py b/autogen/agentchat/contrib/math_user_proxy_agent.py index c0851f4d29..45a58efc6b 100644 --- a/autogen/agentchat/contrib/math_user_proxy_agent.py +++ b/autogen/agentchat/contrib/math_user_proxy_agent.py @@ -402,6 +402,7 @@ class Config: extra = Extra.forbid @root_validator(skip_on_failure=True) + @classmethod def validate_environment(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" wolfram_alpha_appid = get_from_dict_or_env(values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID") diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py index 81d6accd98..12801eb7dc 100644 --- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py @@ -325,16 +325,16 @@ def _init_db(self): if not self._vector_db: return - IS_TO_CHUNK = False # whether to chunk the raw files + is_to_chunk = False # whether to chunk the raw files if self._new_docs: - IS_TO_CHUNK = True + is_to_chunk = True if not self._docs_path: try: self._vector_db.get_collection(self._collection_name) logger.warning(f"`docs_path` is not provided. Use the existing collection `{self._collection_name}`.") self._overwrite = False self._get_or_create = True - IS_TO_CHUNK = False + is_to_chunk = False except ValueError: raise ValueError( "`docs_path` is not provided. " @@ -346,16 +346,16 @@ def _init_db(self): self._vector_db.get_collection(self._collection_name) logger.info(f"Use the existing collection `{self._collection_name}`.", color="green") except ValueError: - IS_TO_CHUNK = True + is_to_chunk = True else: - IS_TO_CHUNK = True + is_to_chunk = True self._vector_db.active_collection = self._vector_db.create_collection( self._collection_name, overwrite=self._overwrite, get_or_create=self._get_or_create ) docs = None - if IS_TO_CHUNK: + if is_to_chunk: if self.custom_text_split_function is not None: chunks, sources = split_files_to_chunks( get_files_from_dir(self._docs_path, self._custom_text_types, self._recursive), diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index 3e4272b3b5..071e7fbf93 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -38,7 +38,7 @@ class AfterWorkOption(Enum): @dataclass -class AFTER_WORK: +class AFTER_WORK: # noqa: N801 """Handles the next step in the conversation when an agent doesn't suggest a tool call or a handoff Args: @@ -55,7 +55,7 @@ def __post_init__(self): @dataclass -class ON_CONDITION: +class ON_CONDITION: # noqa: N801 """Defines a condition for transitioning to another agent or nested chats Args: @@ -86,7 +86,7 @@ def __post_init__(self): @dataclass -class UPDATE_SYSTEM_MESSAGE: +class UPDATE_SYSTEM_MESSAGE: # noqa: N801 """Update the agent's system message before they reply Args: diff --git a/autogen/agentchat/realtime_agent/oai_realtime_client.py b/autogen/agentchat/realtime_agent/oai_realtime_client.py index b102c73656..4756aacb3e 100644 --- a/autogen/agentchat/realtime_agent/oai_realtime_client.py +++ b/autogen/agentchat/realtime_agent/oai_realtime_client.py @@ -335,8 +335,8 @@ async def read_events(self) -> AsyncGenerator[dict[str, Any], None]: logger = self.logger while True: try: - messageJSON = await self._websocket.receive_text() - message = json.loads(messageJSON) + message_json = await self._websocket.receive_text() + message = json.loads(message_json) if "function" in message["type"]: logger.info("Received function message", message) yield message diff --git a/autogen/exception_utils.py b/autogen/exception_utils.py index ac554d7a22..961f57f724 100644 --- a/autogen/exception_utils.py +++ b/autogen/exception_utils.py @@ -7,12 +7,12 @@ from typing import Any -class AgentNameConflict(Exception): +class AgentNameConflict(Exception): # noqa: N818 def __init__(self, msg: str = "Found multiple agents with the same name.", *args: Any, **kwargs: Any): super().__init__(msg, *args, **kwargs) -class NoEligibleSpeaker(Exception): +class NoEligibleSpeaker(Exception): # noqa: N818 """Exception raised for early termination of a GroupChat.""" def __init__(self, message: str = "No eligible speakers."): @@ -20,7 +20,7 @@ def __init__(self, message: str = "No eligible speakers."): super().__init__(self.message) -class SenderRequired(Exception): +class SenderRequired(Exception): # noqa: N818 """Exception raised when the sender is required but not provided.""" def __init__(self, message: str = "Sender is required but not provided."): @@ -28,7 +28,7 @@ def __init__(self, message: str = "Sender is required but not provided."): super().__init__(self.message) -class InvalidCarryOverType(Exception): +class InvalidCarryOverType(Exception): # noqa: N818 """Exception raised when the carryover type is invalid.""" def __init__( @@ -38,7 +38,7 @@ def __init__( super().__init__(self.message) -class UndefinedNextAgent(Exception): +class UndefinedNextAgent(Exception): # noqa: N818 """Exception raised when the provided next agents list does not overlap with agents in the group.""" def __init__(self, message: str = "The provided agents list does not overlap with agents in the group."): diff --git a/autogen/graph_utils.py b/autogen/graph_utils.py index a495fb4131..4af74ee9dc 100644 --- a/autogen/graph_utils.py +++ b/autogen/graph_utils.py @@ -129,18 +129,18 @@ def visualize_speaker_transitions_dict( logging.fatal("Failed to import networkx or matplotlib. Try running 'pip install autogen[graphs]'") raise e - G = nx.DiGraph() + g = nx.DiGraph() # Add nodes - G.add_nodes_from([agent.name for agent in agents]) + g.add_nodes_from([agent.name for agent in agents]) # Add edges for key, value in speaker_transitions_dict.items(): for agent in value: - G.add_edge(key.name, agent.name) + g.add_edge(key.name, agent.name) # Visualize - nx.draw(G, with_labels=True, font_weight="bold") + nx.draw(g, with_labels=True, font_weight="bold") if export_path is not None: plt.savefig(export_path) diff --git a/autogen/messages/base_message.py b/autogen/messages/base_message.py index 4f7eaba8ef..968183b456 100644 --- a/autogen/messages/base_message.py +++ b/autogen/messages/base_message.py @@ -71,11 +71,11 @@ def __init__(self, *args: Any, **data: Any): def print(self, f: Optional[Callable[..., Any]] = None) -> None: self.content.print(f) # type: ignore[attr-defined] - Wrapper = create_model(message_cls.__name__, __base__=WrapperBase) + wrapper_cls = create_model(message_cls.__name__, __base__=WrapperBase) - _message_classes[type_name] = Wrapper + _message_classes[type_name] = wrapper_cls - return Wrapper + return wrapper_cls def get_annotated_type_for_message_classes() -> type[Any]: diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 3f923cd629..1b9d6121dd 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -34,7 +34,7 @@ else: # raises exception if openai>=1 is installed and something is wrong with imports from openai import APIError, APITimeoutError, AzureOpenAI, OpenAI - from openai import __version__ as OPENAIVERSION + from openai import __version__ as openai_version from openai.lib._parsing._completions import type_to_response_format_param from openai.resources import Completions from openai.types.chat import ChatCompletion @@ -63,7 +63,7 @@ cerebras_import_exception: Optional[ImportError] = None except ImportError as e: - cerebras_AuthenticationError = cerebras_InternalServerError = cerebras_RateLimitError = Exception + cerebras_AuthenticationError = cerebras_InternalServerError = cerebras_RateLimitError = Exception # noqa: N816 cerebras_import_exception = e try: @@ -76,7 +76,7 @@ gemini_import_exception: Optional[ImportError] = None except ImportError as e: - gemini_InternalServerError = gemini_ResourceExhausted = Exception + gemini_InternalServerError = gemini_ResourceExhausted = Exception # noqa: N816 gemini_import_exception = e try: @@ -89,7 +89,7 @@ anthropic_import_exception: Optional[ImportError] = None except ImportError as e: - anthorpic_InternalServerError = anthorpic_RateLimitError = Exception + anthorpic_InternalServerError = anthorpic_RateLimitError = Exception # noqa: N816 anthropic_import_exception = e try: @@ -102,7 +102,7 @@ mistral_import_exception: Optional[ImportError] = None except ImportError as e: - mistral_SDKError = mistral_HTTPValidationError = Exception + mistral_SDKError = mistral_HTTPValidationError = Exception # noqa: N816 mistral_import_exception = e try: @@ -112,7 +112,7 @@ together_import_exception: Optional[ImportError] = None except ImportError as e: - together_TogetherException = Exception + together_TogetherException = Exception # noqa: N816 together_import_exception = e try: @@ -126,7 +126,7 @@ groq_import_exception: Optional[ImportError] = None except ImportError as e: - groq_InternalServerError = groq_RateLimitError = groq_APIConnectionError = Exception + groq_InternalServerError = groq_RateLimitError = groq_APIConnectionError = Exception # noqa: N816 groq_import_exception = e try: @@ -140,7 +140,7 @@ cohere_import_exception: Optional[ImportError] = None except ImportError as e: - cohere_InternalServerError = cohere_TooManyRequestsError = cohere_ServiceUnavailableError = Exception + cohere_InternalServerError = cohere_TooManyRequestsError = cohere_ServiceUnavailableError = Exception # noqa: N816 cohere_import_exception = e try: @@ -153,7 +153,7 @@ ollama_import_exception: Optional[ImportError] = None except ImportError as e: - ollama_RequestError = ollama_ResponseError = Exception + ollama_RequestError = ollama_ResponseError = Exception # noqa: N816 ollama_import_exception = e try: @@ -166,7 +166,7 @@ bedrock_import_exception: Optional[ImportError] = None except ImportError as e: - bedrock_BotoCoreError = bedrock_ClientError = Exception + bedrock_BotoCoreError = bedrock_ClientError = Exception # noqa: N816 bedrock_import_exception = e logger = logging.getLogger(__name__) @@ -384,7 +384,7 @@ def _create_or_parse(*args, **kwargs): ), ) for i in range(len(response_contents)): - if OPENAIVERSION >= "1.5": # pragma: no cover + if openai_version >= "1.5": # pragma: no cover # OpenAI versions 1.5.0 and above choice = Choice( index=i, @@ -433,7 +433,7 @@ def cost(self, response: Union[ChatCompletion, Completion]) -> float: n_output_tokens = response.usage.completion_tokens if response.usage is not None else 0 # type: ignore [union-attr] if n_output_tokens is None: n_output_tokens = 0 - tmp_price1K = OAI_PRICE1K[model] + tmp_price1K = OAI_PRICE1K[model] # noqa: N806 # First value is input token rate, second value is output token rate if isinstance(tmp_price1K, tuple): return (tmp_price1K[0] * n_input_tokens + tmp_price1K[1] * n_output_tokens) / 1000 # type: ignore [no-any-return] diff --git a/autogen/oai/client_utils.py b/autogen/oai/client_utils.py index 6f417c90ba..beffe5183e 100644 --- a/autogen/oai/client_utils.py +++ b/autogen/oai/client_utils.py @@ -15,7 +15,7 @@ def validate_parameter( params: dict[str, Any], param_name: str, allowed_types: tuple, - allow_None: bool, + allow_None: bool, # noqa: N803 default_value: Any, numerical_bound: tuple, allowed_values: list, diff --git a/autogen/oai/completion.py b/autogen/oai/completion.py index 4f7d93497e..189141a987 100644 --- a/autogen/oai/completion.py +++ b/autogen/oai/completion.py @@ -40,12 +40,12 @@ RateLimitError, Timeout, ) - from openai import Completion as openai_Completion + from openai import Completion as OpenAICompletion ERROR = None assert openai.__version__ < "1" except (AssertionError, ImportError): - openai_Completion = object + OpenAICompletion = object # The autogen.Completion class requires openai<1 ERROR = AssertionError("(Deprecated) The autogen.Completion class requires openai<1 and diskcache. ") @@ -57,7 +57,7 @@ logger.addHandler(_ch) -class Completion(openai_Completion): +class Completion(OpenAICompletion): """`(openai<1)` A class for OpenAI completion API. It also supports: ChatCompletion, Azure OpenAI API. @@ -81,7 +81,7 @@ class Completion(openai_Completion): } # price per 1k tokens - price1K = { + price1K = { # noqa: N815 "text-ada-001": 0.0004, "text-babbage-001": 0.0005, "text-curie-001": 0.002, @@ -1063,7 +1063,7 @@ def cost(cls, response: dict): usage = response["usage"] n_input_tokens = usage["prompt_tokens"] n_output_tokens = usage.get("completion_tokens", 0) - price1K = cls.price1K[model] + price1K = cls.price1K[model] # noqa: N806 if isinstance(price1K, tuple): return (price1K[0] * n_input_tokens + price1K[1] * n_output_tokens) / 1000 return price1K * (n_input_tokens + n_output_tokens) / 1000 diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py index 1d24cebcaf..4561609e52 100644 --- a/autogen/oai/gemini.py +++ b/autogen/oai/gemini.py @@ -606,6 +606,7 @@ def _create_gemini_function_declaration_schema(json_data) -> Schema: return param_schema + @staticmethod def _create_gemini_function_parameters(function_parameter: dict[str, any]) -> dict[str, any]: """Convert function parameters to Gemini format, recursive""" diff --git a/autogen/retrieve_utils.py b/autogen/retrieve_utils.py index 6b9df68ff3..ac7dcc9039 100644 --- a/autogen/retrieve_utils.py +++ b/autogen/retrieve_utils.py @@ -19,7 +19,7 @@ if chromadb.__version__ < "0.4.15": from chromadb.api import API else: - from chromadb.api import ClientAPI as API + from chromadb.api import ClientAPI as API # noqa: N814 import logging import chromadb.utils.embedding_functions as ef diff --git a/autogen/token_count_utils.py b/autogen/token_count_utils.py index defb163674..948bbfa003 100644 --- a/autogen/token_count_utils.py +++ b/autogen/token_count_utils.py @@ -232,9 +232,9 @@ def num_tokens_from_functions(functions, model="gpt-3.5-turbo-0613") -> int: if "parameters" in function: parameters = function["parameters"] if "properties" in parameters: - for propertiesKey in parameters["properties"]: - function_tokens += len(encoding.encode(propertiesKey)) - v = parameters["properties"][propertiesKey] + for properties_key in parameters["properties"]: + function_tokens += len(encoding.encode(properties_key)) + v = parameters["properties"][properties_key] for field in v: if field == "type": function_tokens += 2 diff --git a/autogen/tools/dependency_injection.py b/autogen/tools/dependency_injection.py index 5a7ed2f7ff..e196fedcf3 100644 --- a/autogen/tools/dependency_injection.py +++ b/autogen/tools/dependency_injection.py @@ -42,7 +42,7 @@ class ChatContext(BaseContext): messages: list[str] = [] -def Depends(x: Any) -> Any: +def Depends(x: Any) -> Any: # noqa: N802 """Creates a dependency for injection based on the provided context or type. Args: diff --git a/notebook/agentchat_databricks_dbrx.ipynb b/notebook/agentchat_databricks_dbrx.ipynb index 2946ce1532..f5eafcd6de 100644 --- a/notebook/agentchat_databricks_dbrx.ipynb +++ b/notebook/agentchat_databricks_dbrx.ipynb @@ -507,7 +507,7 @@ "metadata": {}, "outputs": [], "source": [ - "class Databricks_AutoGenLogger:\n", + "class DatabricksAutoGenLogger:\n", " def __init__(self):\n", " from pyspark.sql import SparkSession\n", "\n", @@ -636,7 +636,7 @@ "user_proxy = autogen.UserProxyAgent(name=\"user\", code_execution_config=False)\n", "\n", "# Before initiating chat, start logging:\n", - "logs = Databricks_AutoGenLogger()\n", + "logs = DatabricksAutoGenLogger()\n", "logs.start()\n", "try:\n", " user_proxy.initiate_chat(assistant, message=\"What is MLflow?\", max_turns=1)\n", diff --git a/notebook/agentchat_websockets.ipynb b/notebook/agentchat_websockets.ipynb index b3f7e87879..165b58c9b0 100644 --- a/notebook/agentchat_websockets.ipynb +++ b/notebook/agentchat_websockets.ipynb @@ -429,7 +429,7 @@ " def __init__(self, *args, **kwargs):\n", " super().__init__(*args, directory=temp_dir, **kwargs)\n", "\n", - " def do_GET(self):\n", + " def do_GET(self): # noqa: N802\n", " if self.path == \"/\":\n", " self.path = \"/chat.html\"\n", " return SimpleHTTPRequestHandler.do_GET(self)\n", @@ -465,7 +465,7 @@ ] }, "kernelspec": { - "display_name": "venv", + "display_name": ".venv-3.9", "language": "python", "name": "python3" }, @@ -479,7 +479,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.9.20" } }, "nbformat": 4, diff --git a/pyproject.toml b/pyproject.toml index 724182cc44..4e9b27f10d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -253,7 +253,7 @@ select = [ "E", # pycodestyle errors https://docs.astral.sh/ruff/rules/#error-e "W", # pycodestyle warnings https://docs.astral.sh/ruff/rules/#warning-w "C90", # mccabe https://docs.astral.sh/ruff/rules/#mccabe-c90 -# "N", # pep8-naming https://docs.astral.sh/ruff/rules/#pep8-naming-n + "N", # pep8-naming https://docs.astral.sh/ruff/rules/#pep8-naming-n # "D", # pydocstyle https://docs.astral.sh/ruff/rules/#pydocstyle-d "I", # isort https://docs.astral.sh/ruff/rules/#isort-i "F", # pyflakes https://docs.astral.sh/ruff/rules/#pyflakes-f @@ -358,8 +358,3 @@ warn_unused_ignores = false disallow_incomplete_defs = true disallow_untyped_decorators = true disallow_any_unimported = true - -[tool.codespell] -#skip = "./venv*,./docs/site/*,./htmlcov,./examples/openapi/whatsapp_openapi_complete.json" -skip = "paul_graham_essay.txt,attractions.json,website,notebook,dotnet" -ignore-words = ".codespell-whitelist.txt" diff --git a/test/agentchat/contrib/test_llamaindex_conversable_agent.py b/test/agentchat/contrib/test_llamaindex_conversable_agent.py index 2f8a3fa900..6b993f01da 100644 --- a/test/agentchat/contrib/test_llamaindex_conversable_agent.py +++ b/test/agentchat/contrib/test_llamaindex_conversable_agent.py @@ -30,7 +30,7 @@ pass -openaiKey = MOCK_OPEN_AI_API_KEY +openai_key = MOCK_OPEN_AI_API_KEY @pytest.mark.skipif(skip_for_dependencies, reason=skip_reason) @@ -44,7 +44,7 @@ def test_group_chat_with_llama_index_conversable_agent(chat_mock: MagicMock) -> llm = OpenAI( model="gpt-4o", temperature=0.0, - api_key=openaiKey, + api_key=openai_key, ) chat_mock.return_value = AgentChatResponse( diff --git a/test/agentchat/realtime_agent/test_e2e.py b/test/agentchat/realtime_agent/test_e2e.py index b3c58c2d23..a8b8ad56ac 100644 --- a/test/agentchat/realtime_agent/test_e2e.py +++ b/test/agentchat/realtime_agent/test_e2e.py @@ -102,7 +102,7 @@ async def test_e2e(self, credentials_gpt_4o_realtime: Credentials) -> None: """ i = 0 - count = 3 + count = 5 while True: try: await self._test_e2e(credentials_gpt_4o_realtime=credentials_gpt_4o_realtime) diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 83b557a675..e225d95605 100755 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -575,7 +575,7 @@ def exec_sh(script: str) -> None: def test__wrap_function_sync(): - CurrencySymbol = Literal["USD", "EUR"] + CurrencySymbol = Literal["USD", "EUR"] # noqa: N806 class Currency(BaseModel): currency: CurrencySymbol = Field(description="Currency code") @@ -613,7 +613,7 @@ def currency_calculator( @pytest.mark.asyncio async def test__wrap_function_async(): - CurrencySymbol = Literal["USD", "EUR"] + CurrencySymbol = Literal["USD", "EUR"] # noqa: N806 class Currency(BaseModel): currency: CurrencySymbol = Field(description="Currency code") @@ -812,7 +812,7 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: assert exec_python.description == "Execute a Python cell." -def test_register_for_llm_without_LLM(): +def test_register_for_llm_without_LLM(): # noqa: N802 agent = ConversableAgent(name="agent", llm_config=None) with pytest.raises( AssertionError, diff --git a/test/agentchat/test_dependancy_injection.py b/test/agentchat/test_dependancy_injection.py index f91d963eb9..76ac0f1c5b 100644 --- a/test/agentchat/test_dependancy_injection.py +++ b/test/agentchat/test_dependancy_injection.py @@ -15,10 +15,109 @@ from ..conftest import Credentials, reason, skip_openai # noqa: E402 -class TestDependencyInjection: - class MyContext(BaseContext, BaseModel): - b: int +class MyContext(BaseContext, BaseModel): + b: int + + +def f_with_annotated( + a: int, + ctx: Annotated[MyContext, Depends(MyContext(b=2))], + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + c + + +async def f_with_annotated_async( + a: int, + ctx: Annotated[MyContext, Depends(MyContext(b=2))], + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + c + + +def f_without_annotated( + a: int, + ctx: MyContext = Depends(MyContext(b=3)), + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + c + + +async def f_without_annotated_async( + a: int, + ctx: MyContext = Depends(MyContext(b=3)), + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + c + + +def f_with_annotated_and_depends( + a: int, + ctx: MyContext = MyContext(b=4), + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + c + + +async def f_with_annotated_and_depends_async( + a: int, + ctx: MyContext = MyContext(b=4), + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + c + +def f_with_multiple_depends( + a: int, + ctx: Annotated[MyContext, Depends(MyContext(b=2))], + ctx2: Annotated[MyContext, Depends(MyContext(b=3))], + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + ctx2.b + c + + +async def f_with_multiple_depends_async( + a: int, + ctx: Annotated[MyContext, Depends(MyContext(b=2))], + ctx2: Annotated[MyContext, Depends(MyContext(b=3))], + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx.b + ctx2.b + c + + +def f_wihout_base_context( + a: int, + ctx: Annotated[int, Depends(lambda a: a + 2)], + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx + c + + +async def f_wihout_base_context_async( + a: int, + ctx: Annotated[int, Depends(lambda a: a + 2)], + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx + c + + +def f_with_default_depends( + a: int, + ctx: int = Depends(lambda a: a + 2), + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx + c + + +async def f_with_default_depends_async( + a: int, + ctx: int = Depends(lambda a: a + 2), + c: Annotated[int, "c description"] = 3, +) -> int: + return a + ctx + c + + +class TestDependencyInjection: @pytest.fixture() def expected_tools(self) -> list[dict[str, Any]]: return [ @@ -39,92 +138,6 @@ def expected_tools(self) -> list[dict[str, Any]]: } ] - def f_with_annotated( - a: int, - ctx: Annotated[MyContext, Depends(MyContext(b=2))], - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + c - - async def f_with_annotated_async( - a: int, - ctx: Annotated[MyContext, Depends(MyContext(b=2))], - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + c - - def f_without_annotated( - a: int, - ctx: MyContext = Depends(MyContext(b=3)), - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + c - - async def f_without_annotated_async( - a: int, - ctx: MyContext = Depends(MyContext(b=3)), - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + c - - def f_with_annotated_and_depends( - a: int, - ctx: MyContext = MyContext(b=4), - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + c - - async def f_with_annotated_and_depends_async( - a: int, - ctx: MyContext = MyContext(b=4), - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + c - - def f_with_multiple_depends( - a: int, - ctx: Annotated[MyContext, Depends(MyContext(b=2))], - ctx2: Annotated[MyContext, Depends(MyContext(b=3))], - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + ctx2.b + c - - async def f_with_multiple_depends_async( - a: int, - ctx: Annotated[MyContext, Depends(MyContext(b=2))], - ctx2: Annotated[MyContext, Depends(MyContext(b=3))], - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx.b + ctx2.b + c - - def f_wihout_base_context( - a: int, - ctx: Annotated[int, Depends(lambda a: a + 2)], - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx + c - - async def f_wihout_base_context_async( - a: int, - ctx: Annotated[int, Depends(lambda a: a + 2)], - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx + c - - def f_with_default_depends( - a: int, - ctx: int = Depends(lambda a: a + 2), - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx + c - - async def f_with_default_depends_async( - a: int, - ctx: int = Depends(lambda a: a + 2), - c: Annotated[int, "c description"] = 3, - ) -> int: - return a + ctx + c - @pytest.mark.parametrize( ("func", "func_name", "is_async", "expected"), [ diff --git a/test/messages/test_base_message.py b/test/messages/test_base_message.py index 1933af8e70..6edbfaebbf 100644 --- a/test/messages/test_base_message.py +++ b/test/messages/test_base_message.py @@ -19,7 +19,7 @@ @pytest.fixture() -def TestMessage() -> Generator[type[BaseMessage], None, None]: +def TestMessage() -> Generator[type[BaseMessage], None, None]: # noqa: N802 org_message_classes = _message_classes.copy() try: @@ -36,7 +36,7 @@ class TestMessage(BaseMessage): class TestBaseMessage: - def test_model_dump_validate(self, TestMessage: type[BaseModel], uuid: UUID) -> None: + def test_model_dump_validate(self, TestMessage: type[BaseModel], uuid: UUID) -> None: # noqa: N803 # print(f"{TestMessage=}") message = TestMessage(uuid=uuid, sender="sender", receiver="receiver", content="Hello, World!") diff --git a/test/oai/test_custom_client.py b/test/oai/test_custom_client.py index 0e7fea224d..72ac75a333 100644 --- a/test/oai/test_custom_client.py +++ b/test/oai/test_custom_client.py @@ -18,15 +18,15 @@ else: skip = False +TEST_COST = 20000000 +TEST_CUSTOM_RESPONSE = "This is a custom response." +TEST_DEVICE = "cpu" +TEST_LOCAL_MODEL_NAME = "local_model_name" +TEST_OTHER_PARAMS_VAL = "other_params" +TEST_MAX_LENGTH = 1000 -def test_custom_model_client(): - TEST_COST = 20000000 - TEST_CUSTOM_RESPONSE = "This is a custom response." - TEST_DEVICE = "cpu" - TEST_LOCAL_MODEL_NAME = "local_model_name" - TEST_OTHER_PARAMS_VAL = "other_params" - TEST_MAX_LENGTH = 1000 +def test_custom_model_client(): class CustomModel: def __init__(self, config: dict, test_hook): self.test_hook = test_hook diff --git a/test/test_notebook.py b/test/test_notebook.py index 533df8b623..f0aaca5446 100755 --- a/test/test_notebook.py +++ b/test/test_notebook.py @@ -98,7 +98,7 @@ def test_agentchat_function_call_async(save=False): skip or not sys.version.startswith("3.12"), reason="do not run if openai is not installed or py!=3.12", ) -def _test_agentchat_MathChat(save=False): +def _test_agentchat_MathChat(save=False): # noqa: N802 run_notebook("agentchat_MathChat.ipynb", save=save) diff --git a/test/tools/test_dependency_injection.py b/test/tools/test_dependency_injection.py index e504c810ff..f2613e7c9b 100644 --- a/test/tools/test_dependency_injection.py +++ b/test/tools/test_dependency_injection.py @@ -24,13 +24,13 @@ class MyContext(BaseContext, BaseModel): b: int def f_with_annotated( # type: ignore[misc] - a: int, + a: int, # noqa: N805 ctx: Annotated[MyContext, Depends(MyContext(b=2))], ) -> int: return a + ctx.b async def f_with_annotated_async( # type: ignore[misc] - a: int, + a: int, # noqa: N805 ctx: Annotated[MyContext, Depends(MyContext(b=2))], ) -> int: return a + ctx.b @@ -64,14 +64,14 @@ async def f_without_annotated_and_depends_async( return a + ctx.b @staticmethod - def f_without_MyContext( + def f_without_MyContext( # noqa: N802 a: int, ctx: Annotated[int, Depends(lambda a: a + 2)], ) -> int: return a + ctx @staticmethod - def f_without_MyContext_async( + def f_without_MyContext_async( # noqa: N802 a: int, ctx: Annotated[int, Depends(lambda a: a + 2)], ) -> int: diff --git a/website/docs/topics/code-execution/.gitignore b/website/docs/topics/code-execution/.gitignore new file mode 100644 index 0000000000..e0230e42a3 --- /dev/null +++ b/website/docs/topics/code-execution/.gitignore @@ -0,0 +1 @@ +coding diff --git a/website/docs/topics/code-execution/user-defined-functions.ipynb b/website/docs/topics/code-execution/user-defined-functions.ipynb index ade02dfc08..0c05a9b287 100644 --- a/website/docs/topics/code-execution/user-defined-functions.ipynb +++ b/website/docs/topics/code-execution/user-defined-functions.ipynb @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -96,13 +96,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "from pandas import DataFrame\n", - "from pandas import DataFrame as df\n", + "from pandas import DataFrame as Df\n", "\n", "from autogen.coding.func_with_reqs import Alias, ImportFromModule, with_requirements\n", "\n", @@ -115,8 +115,8 @@ "def some_func2() -> DataFrame: ...\n", "\n", "\n", - "@with_requirements(python_packages=[\"pandas\"], global_imports=[ImportFromModule(\"pandas\", Alias(\"DataFrame\", \"df\"))])\n", - "def some_func3() -> df: ..." + "@with_requirements(python_packages=[\"pandas\"], global_imports=[ImportFromModule(\"pandas\", Alias(\"DataFrame\", \"Df\"))])\n", + "def some_func3() -> Df: ..." ] }, { @@ -130,7 +130,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -153,20 +153,12 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "exit_code=0 output='3\\n' code_file='/Users/jackgerrits/w/autogen/website/docs/topics/code-execution/coding/tmp_code_1958fe3aea3e8e3c6e907fe951b5f6ab.py'\n" - ] - } - ], + "outputs": [], "source": [ "code = f\"\"\"\n", - "from {LocalCommandLineCodeExecutor.FUNCTIONS_MODULE} import add_two_numbers\n", + "from {LocalCommandLineCodeExecutor.functions_module} import add_two_numbers\n", "\n", "print(add_two_numbers(1, 2))\n", "\"\"\"\n", @@ -189,25 +181,12 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " name location age\n", - "0 John New York 24\n", - "1 Anna Paris 13\n", - "2 Peter Berlin 53\n", - "3 Linda London 33\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "code = f\"\"\"\n", - "from {LocalCommandLineCodeExecutor.FUNCTIONS_MODULE} import load_data\n", + "from {LocalCommandLineCodeExecutor.functions_module} import load_data\n", "\n", "print(load_data())\n", "\"\"\"\n", @@ -241,40 +220,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You have been given coding capability to solve tasks using Python code.\n", - "In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.\n", - " 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.\n", - " 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.\n", - "Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.\n", - "When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.\n", - "If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.\n", - "You have access to the following user defined functions. They can be accessed from the module called `functions` by their function names.\n", - "\n", - "For example, if there was a function called `foo` you could import it by writing `from functions import foo`\n", - "\n", - "def add_two_numbers(a: int, b: int) -> int:\n", - " \"\"\"Add two numbers together.\"\"\"\n", - " ...\n", - "\n", - "def load_data() -> pandas.core.frame.DataFrame:\n", - " \"\"\"Load some sample data.\n", - "\n", - " Returns:\n", - " pandas.DataFrame: A DataFrame with the following columns: name(str), location(str), age(int)\n", - " \"\"\"\n", - " ...\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "nlnl = \"\\n\\n\"\n", "code_writer_system_message = \"\"\"\n", @@ -302,7 +250,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -329,7 +277,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -352,61 +300,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mcode_executor_agent\u001b[0m (to code_writer):\n", - "\n", - "Please use the load_data function to load the data and please calculate the average age of all people.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mcode_writer\u001b[0m (to code_executor_agent):\n", - "\n", - "Below is the python code to load the data using the `load_data()` function and calculate the average age of all people. \n", - "\n", - "```python\n", - "# python code\n", - "from functions import load_data\n", - "import numpy as np\n", - "\n", - "# Load the data\n", - "df = load_data()\n", - "\n", - "# Calculate the average age\n", - "avg_age = np.mean(df['age'])\n", - "\n", - "print(\"The average age is\", avg_age)\n", - "```\n", - "\n", - "This code starts by importing the `load_data()` function. It then uses this function to load the data into a variable `df`. Afterwards, it calculates the average (mean) of the 'age' column in the DataFrame, before printing the result.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK (inferred language is python)...\u001b[0m\n", - "\u001b[33mcode_executor_agent\u001b[0m (to code_writer):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: The average age is 30.75\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mcode_writer\u001b[0m (to code_executor_agent):\n", - "\n", - "Great! The code worked fine. So, the average age of all people in the dataset is 30.75 years.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mcode_executor_agent\u001b[0m (to code_writer):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - } - ], + "outputs": [], "source": [ "chat_result = code_executor_agent.initiate_chat(\n", " code_writer_agent,\n", @@ -424,17 +320,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The average age of all people in the dataset is 30.75 years.\n" - ] - } - ], + "outputs": [], "source": [ "print(chat_result.summary)" ] @@ -442,7 +330,7 @@ ], "metadata": { "kernelspec": { - "display_name": "autogen", + "display_name": ".venv-3.9", "language": "python", "name": "python3" }, @@ -456,7 +344,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.9.20" } }, "nbformat": 4,