From b6342b2932567fa854254c070183bd142a1aa549 Mon Sep 17 00:00:00 2001 From: Xuchen Pan <32844285+pan-x-c@users.noreply.github.com> Date: Fri, 29 Mar 2024 17:03:02 +0800 Subject: [PATCH] Add distributed search example (#121) --- .pre-commit-config.yaml | 1 + README.md | 10 ++- README_ZH.md | 5 +- examples/distributed_basic/README.md | 25 ++++++ .../configs/model_configs.json | 13 ++- .../distributed_dialog.py | 4 +- .../README.md | 30 +------ .../configs/debate_agent_configs.json | 0 .../configs/model_configs.json | 21 +++++ .../distributed_debate.py | 0 .../user_proxy_agent.py | 0 examples/distributed_search/README.md | 83 +++++++++++++++++ examples/distributed_search/answerer_agent.py | 60 +++++++++++++ .../configs/model_configs.json | 12 +++ examples/distributed_search/main.py | 77 ++++++++++++++++ examples/distributed_search/searcher_agent.py | 88 +++++++++++++++++++ 16 files changed, 387 insertions(+), 42 deletions(-) create mode 100644 examples/distributed_basic/README.md rename examples/{distributed => distributed_basic}/configs/model_configs.json (64%) rename examples/{distributed => distributed_basic}/distributed_dialog.py (96%) rename examples/{distributed => distributed_debate}/README.md (66%) rename examples/{distributed => distributed_debate}/configs/debate_agent_configs.json (100%) create mode 100644 examples/distributed_debate/configs/model_configs.json rename examples/{distributed => distributed_debate}/distributed_debate.py (100%) rename examples/{distributed => distributed_debate}/user_proxy_agent.py (100%) create mode 100644 examples/distributed_search/README.md create mode 100644 examples/distributed_search/answerer_agent.py create mode 100644 examples/distributed_search/configs/model_configs.json create mode 100644 examples/distributed_search/main.py create mode 100644 examples/distributed_search/searcher_agent.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ba726ab7..4de2695be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,6 +40,7 @@ repos: --disable-error-code=import-untyped, --disable-error-code=truthy-function, --follow-imports=skip, + --explicit-package-bases, ] # - repo: https://github.com/numpy/numpydoc # rev: v1.6.0 diff --git a/README.md b/README.md index 2c68bb324..91367b4cc 100644 --- a/README.md +++ b/README.md @@ -114,8 +114,9 @@ the following libraries. - [Werewolf](./examples/game_werewolf) - Distribution - - [Distributed Conversation](./examples/distribution_conversation) - - [Distributed Debate](./examples/distribution_debate) + - [Distributed Conversation](./examples/distributed_basic) + - [Distributed Debate](./examples/distributed_debate) + - [Distributed Search](./examples/distributed_search) More models, services and examples are coming soon! @@ -245,22 +246,25 @@ AgentScope provides an easy-to-use runtime user interface capable of displaying multimodal output on the front end, including text, images, audio and video. To start a studio, you should install the `full` version of AgentScope. + ``` # On windows pip install -e .[full] # On mac pip install -e .\[full\] ``` + Once installed, you can just run + ``` as_studio path/to/your/script.py ``` + Then the studio will be launched at `localhost:xxxx`, and you can see the UI similar to the following: ![](https://gw.alicdn.com/imgextra/i3/O1CN01X673v81WaHV1oCxEN_!!6000000002804-0-tps-2992-1498.jpg) To be able to use the `as_studio` functionality, please implement the `main` function in your code. More detail can be found in [src/agentscope/web/README.md](src/agentscope/web/README.md). - ## Tutorial - [Getting Started](https://modelscope.github.io/agentscope/en/tutorial/quick_start.html) diff --git a/README_ZH.md b/README_ZH.md index 53a542c25..44f6fb5d2 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -102,8 +102,9 @@ AgentScope支持使用以下库快速部署本地模型服务。 - [狼人杀](./examples/game_werewolf) - 分布式 - - [分布式对话](./examples/distribution_conversation) - - [分布式辩论](./examples/distribution_debate) + - [分布式对话](./examples/distributed_basic) + - [分布式辩论](./examples/distributed_debate) + - [分布式搜索](./examples/distributed_search) 更多模型API、服务和示例即将推出! diff --git a/examples/distributed_basic/README.md b/examples/distributed_basic/README.md new file mode 100644 index 000000000..5a095e0e2 --- /dev/null +++ b/examples/distributed_basic/README.md @@ -0,0 +1,25 @@ +# Distributed Basic + +This example run a assistant agent and a user agent as seperate processes and use rpc to communicate between them. + +Before running the example, please install the distributed version of Agentscope, fill in your model configuration correctly in `configs/model_configs.json`, and modify the `model_config_name` field in `distributed_dialog.py` accordingly. + +Then, use the following command to start the assistant agent. + +``` +cd examples/distributed +python distributed_dialog.py --role assistant --assistant-host localhost --assistant-port 12010 +# Please make sure the port is available. +# If the assistant agent and the user agent are started on different machines, +# please fill in the ip address of the assistant agent in the host field +``` + +Then, run the user agent. + +``` +python distributed_dialog.py --role user --assistant-host localhost --assistant-port 12010 +# If the assistant agent is started on another machine, +# please fill in the ip address of the assistant agent in the host field +``` + +Now, you can chat with the assistant agent using the command line. diff --git a/examples/distributed/configs/model_configs.json b/examples/distributed_basic/configs/model_configs.json similarity index 64% rename from examples/distributed/configs/model_configs.json rename to examples/distributed_basic/configs/model_configs.json index fa8ebbf7b..3700b6e49 100644 --- a/examples/distributed/configs/model_configs.json +++ b/examples/distributed_basic/configs/model_configs.json @@ -1,20 +1,19 @@ [ { - "config_name": "gpt-3.5-turbo", + "config_name": "gpt-4", "model_type": "openai", - "model_name": "gpt-3.5-turbo", + "model_name": "gpt-4", "api_key": "xxx", "organization": "xxx", "generate_args": { - "temperature": 0.0 + "temperature": 0.5 } }, { - "config_name": "gpt-4", - "model_type": "openai", - "model_name": "gpt-4", + "config_name": "qwen", + "model_type": "dashscope_chat", + "model_name": "qwen-max", "api_key": "xxx", - "organization": "xxx", "generate_args": { "temperature": 0.5 } diff --git a/examples/distributed/distributed_dialog.py b/examples/distributed_basic/distributed_dialog.py similarity index 96% rename from examples/distributed/distributed_dialog.py rename to examples/distributed_basic/distributed_dialog.py index cc210d325..d3c99cfa5 100644 --- a/examples/distributed/distributed_dialog.py +++ b/examples/distributed_basic/distributed_dialog.py @@ -41,7 +41,7 @@ def setup_assistant_server(assistant_host: str, assistant_port: int) -> None: agent_kwargs={ "name": "Assitant", "sys_prompt": "You are a helpful assistant.", - "model_config_name": "gpt-3.5-turbo", + "model_config_name": "qwen", "use_memory": True, }, host=assistant_host, @@ -59,7 +59,7 @@ def run_main_process(assistant_host: str, assistant_port: int) -> None: assistant_agent = DialogAgent( name="Assistant", sys_prompt="You are a helpful assistant.", - model_config_name="gpt-3.5-turbo", + model_config_name="qwen", use_memory=True, ).to_dist( host=assistant_host, diff --git a/examples/distributed/README.md b/examples/distributed_debate/README.md similarity index 66% rename from examples/distributed/README.md rename to examples/distributed_debate/README.md index 1e1610894..6b43a3422 100644 --- a/examples/distributed/README.md +++ b/examples/distributed_debate/README.md @@ -1,30 +1,4 @@ -# Distributed multi-agent example - -## Distributed dialogue (`distributed_dialog.py`) - -This example run a assistant agent and a user agent as seperate processes and use rpc to communicate between them. - -First, use the following command to start the assistant agent. - -``` -cd examples/distributed -python distributed_dialog.py --role assistant --assistant-host localhost --assistant-port 12010 -# please make sure the port is available -# if the assistant agent and the user agent are started on different machines -# please fill in the ip address of the assistant agent in the host field -``` - -Then, run the user agent. - -``` -python distributed_dialog.py --role user --assistant-host localhost --assistant-port 12010 -# if the assistant agent is started on another machine -# please fill in the ip address of the assistant agent in the host field -``` - -Now, you can chat with the assistant agent using the command line. - -## Distributed debate competition (`distributed_debate.py`) +# Distributed debate competition This example simulate a debate competition with three participant agents, including the affirmative side (**Pro**), the negative side (**Con**), and the adjudicator (**Judge**). **You can join in the debate as Pro or Con or both.** @@ -36,7 +10,7 @@ Each agent is an independent process and can run on different machines. You can join the debate as Pro or Con by providing the `--is-human` argument. Messages generated by any agents can be observed by other agents in the debate. -> Due to role restrictions, DashScope APIs (e.g. Qwen) are currently unable to execute this example. +Before running the example, please install the distributed version of AgentScope, fill in your model configuration correctly in `configs/model_configs.json`, and modify the `model_config_name` field in `configs/debate_agent_configs.json` accordingly. ### Step 1: setup Pro, Con agent servers diff --git a/examples/distributed/configs/debate_agent_configs.json b/examples/distributed_debate/configs/debate_agent_configs.json similarity index 100% rename from examples/distributed/configs/debate_agent_configs.json rename to examples/distributed_debate/configs/debate_agent_configs.json diff --git a/examples/distributed_debate/configs/model_configs.json b/examples/distributed_debate/configs/model_configs.json new file mode 100644 index 000000000..3700b6e49 --- /dev/null +++ b/examples/distributed_debate/configs/model_configs.json @@ -0,0 +1,21 @@ +[ + { + "config_name": "gpt-4", + "model_type": "openai", + "model_name": "gpt-4", + "api_key": "xxx", + "organization": "xxx", + "generate_args": { + "temperature": 0.5 + } + }, + { + "config_name": "qwen", + "model_type": "dashscope_chat", + "model_name": "qwen-max", + "api_key": "xxx", + "generate_args": { + "temperature": 0.5 + } + } +] \ No newline at end of file diff --git a/examples/distributed/distributed_debate.py b/examples/distributed_debate/distributed_debate.py similarity index 100% rename from examples/distributed/distributed_debate.py rename to examples/distributed_debate/distributed_debate.py diff --git a/examples/distributed/user_proxy_agent.py b/examples/distributed_debate/user_proxy_agent.py similarity index 100% rename from examples/distributed/user_proxy_agent.py rename to examples/distributed_debate/user_proxy_agent.py diff --git a/examples/distributed_search/README.md b/examples/distributed_search/README.md new file mode 100644 index 000000000..57a66e04b --- /dev/null +++ b/examples/distributed_search/README.md @@ -0,0 +1,83 @@ +# Multi-Agent Copilot Search + +## Introduction + +This example application converts the user's questions into keywords to call the search engine, and then retrieves a series of web pages to find answers. It involves three types of Agents, namely the UserAgent for the user, the SearcherAgent responsible for searching, and the AnswererAgent responsible for retrieves web pages. + +There are many web page links returned by the search engine. To improve performance, multiple instances of AnswererAgent need to run together. However, with the traditional single-process mode, even if there are multiple AnswererAgent instances, they can only obtain web page and answer questions one by one on a single CPU. + +But, with AgentScope's distributed mode, you can automatically make these AnswererAgent instances run at the same time to improve performance. + +From this example, you can learn: + +- how to run multiple agents in different processes, +- how to make multiple agents run in parallel automatically, +- how to convert a single-process version AgentScope application into a multi-processes version. + +## How to Run + +### Step 0: Install AgentScope distributed version + +This example requires the distributed version of AgentScope. + +```bash +# On windows +pip install -e .[distribute] +# On mac / linux +pip install -e .\[distribute\] +``` + +### Step 1: Prepare your model and search engine API configuration + +For the model configuration, please fill your model configurations in `configs/model_configs.json`. +Here we give an example. + +> Dashscope models, e.g. qwen-max, and openai models, e.g. gpt-3.5-turbo and gpt-4 are tested for this example. +> Other models may require certain modification to the code. + +```json +[ + { + "config_name": "my_model", + "model_type": "dashscope_chat", + "model_name": "qwen-max", + "api_key": "your_api_key", + "generate_args": { + "temperature": 0.5 + }, + "messages_key": "input" + } +] +``` + +For search engines, this example now supports two types of search engines, google and bing. The configuration items for each of them are as follows: + +- google + - `api-key` + - `cse-id` +- bing + - `api-key` + +### Step 2: Run the example + +Use the `main.py` script to run the example. The following are the parameters required to run the script: + +- `--num-workers`: The number of AnswererAgent instances. +- `--use-dist`: Enable distributed mode. +- `--search-engine`: The search engine used, currently supports `google` or `bing`. +- `--api-key`: API key for google or bing. +- `--cse-id`: CSE id for google (If you use bing, ignore this parameter). + +For example, if you want to start the example application in distribtued mode with 10 AnswererAgents and use the bing search engine, you can use the following command + +```shell +python main.py --num-workers 10 --search-engine bing --api-key xxxxx --use-dist +``` + +And if you want to run the above case in a traditional single-process mode, you can use the following command. + +```shell +python main.py --num-workers 10 --search-engine bing --api-key xxxxx +``` + +You can ask the same question in both modes to compare the difference in runtime. For examples, answer a question with 10 workers only takes 13.2s in distributed mode, while it takes 51.3s in single-process mode. diff --git a/examples/distributed_search/answerer_agent.py b/examples/distributed_search/answerer_agent.py new file mode 100644 index 000000000..5e0982080 --- /dev/null +++ b/examples/distributed_search/answerer_agent.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +"""Answerer Agent.""" + +from agentscope.message import Msg +from agentscope.agents import AgentBase +from agentscope.service.web_search.web_digest import load_web + + +class AnswererAgent(AgentBase): + """An agent with web digest tool.""" + + def __init__( + self, + name: str, + model_config_name: str = None, + ) -> None: + super().__init__( + name=name, + sys_prompt="You are an AI assistant. You need to find answers to " + "user questions based on specified web content.", + model_config_name=model_config_name, + use_memory=False, + ) + + def reply(self, x: dict = None) -> dict: + response = load_web( + url=x.url, + keep_raw=False, + html_selected_tags=["p", "div", "h1", "li"], + timeout=5, + ).content + if ( + "html_to_text" not in response + or len(response["html_to_text"]) == 0 + ): + return Msg( + self.name, + content=f"Unable to load web page [{x.url}].", + url=x.url, + ) + # prepare prompt + prompt = self.model.format( + Msg(name="system", role="system", content=self.sys_prompt), + Msg( + name="user", + role="user", + content=f"Please answer my question based on the content of" + " the following web page:\n\n" + f"{response['html_to_text']}" + f"\n\nBased on the above web page," + " please answer my question\n{x.query}", + ), + ) + # call llm and generate response + response = self.model(prompt).text + msg = Msg(self.name, content=response, url=x.url) + + self.speak(msg) + + return msg diff --git a/examples/distributed_search/configs/model_configs.json b/examples/distributed_search/configs/model_configs.json new file mode 100644 index 000000000..9b53b2594 --- /dev/null +++ b/examples/distributed_search/configs/model_configs.json @@ -0,0 +1,12 @@ +[ + { + "model_type": "tongyi_chat", + "config_name": "my_model", + "model_name": "qwen-max", + "api_key": "your_api_key", + "generate_args": { + "temperature": 0.5 + }, + "messages_key": "input" + } +] \ No newline at end of file diff --git a/examples/distributed_search/main.py b/examples/distributed_search/main.py new file mode 100644 index 000000000..e03c713f3 --- /dev/null +++ b/examples/distributed_search/main.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +"""An example use multiple agents to search the Internet for answers""" +import time +import argparse +from loguru import logger +from searcher_agent import SearcherAgent +from answerer_agent import AnswererAgent + +import agentscope +from agentscope.agents.user_agent import UserAgent +from agentscope.message import Msg + + +def parse_args() -> argparse.Namespace: + """Parse arguments""" + parser = argparse.ArgumentParser() + parser.add_argument("--num-workers", type=int, default=5) + parser.add_argument("--use-dist", action="store_true") + parser.add_argument( + "--search-engine", + type=str, + choices=["google", "bing"], + default="google", + ) + parser.add_argument( + "--api-key", + type=str, + ) + parser.add_argument("--cse-id", type=str, default=None) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + agentscope.init( + model_configs="configs/model_configs.json", + ) + + WORKER_NUM = 3 + searcher = SearcherAgent( + name="Searcher", + model_config_name="my_model", + result_num=args.num_workers, + search_engine_type=args.search_engine, + api_key=args.api_key, + cse_id=args.cse_id, + ) + answerers = [] + for i in range(args.num_workers): + answerer = AnswererAgent( + name=f"Answerer-{i}", + model_config_name="my_model", + ) + if args.use_dist: + answerer = answerer.to_dist(lazy_launch=False) + answerers.append(answerer) + + user_agent = UserAgent() + + msg = user_agent() + while not msg.content == "exit": + start_time = time.time() + msg = searcher(msg) + results = [] + for page, worker in zip(msg.content, answerers): + results.append(worker(Msg(**page))) + for result in results: + logger.chat(result) + end_time = time.time() + logger.chat( + Msg( + name="system", + role="system", + content=f"Completed in [{end_time - start_time:.2f}]s", + ), + ) + msg = user_agent() diff --git a/examples/distributed_search/searcher_agent.py b/examples/distributed_search/searcher_agent.py new file mode 100644 index 000000000..127acf13e --- /dev/null +++ b/examples/distributed_search/searcher_agent.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +"""Searcher agent.""" + +from functools import partial +from agentscope.message import Msg +from agentscope.agents import AgentBase +from agentscope.service.web_search.search import google_search, bing_search + + +class SearcherAgent(AgentBase): + """An agent with search tool.""" + + def __init__( + self, + name: str, + model_config_name: str = None, + result_num: int = 10, + search_engine_type: str = "google", + api_key: str = None, + cse_id: str = None, + ) -> None: + """Init a SearcherAgent. + + Args: + name (`str`): the name of this agent. + model_config_name (`str`, optional): The name of model + configuration for this agent. Defaults to None. + result_num (`int`, optional): The number of return results. + Defaults to 10. + search_engine_type (`str`, optional): the search engine to use. + Defaults to "google". + api_key (`str`, optional): api key for the search engine. Defaults + to None. + cse_id (`str`, optional): cse_id for the search engine. Defaults to + None. + """ + super().__init__( + name=name, + sys_prompt="You are an AI assistant who optimizes search" + " keywords. You need to transform users' questions into a series " + "of efficient search keywords.", + model_config_name=model_config_name, + use_memory=False, + ) + self.result_num = result_num + if search_engine_type == "google": + assert (api_key is not None) and ( + cse_id is not None + ), "google search requires 'api_key' and 'cse_id'" + self.search = partial( + google_search, + api_key=api_key, + cse_id=cse_id, + ) + elif search_engine_type == "bing": + assert api_key is not None, "bing search requires 'api_key'" + self.search = partial(bing_search, api_key=api_key) + + def reply(self, x: dict = None) -> dict: + prompt = self.model.format( + Msg(name="system", role="system", content=self.sys_prompt), + x, + Msg( + name="user", + role="user", + content="Please convert the question into keywords. The return" + " format is:\nKeyword1 Keyword2...", + ), + ) + query = self.model(prompt).text + self.speak(query) + results = self.search( + question=query, + num_results=self.result_num, + ).content + msg = Msg( + self.name, + content=[ + Msg( + name=self.name, + content=result, + url=result["link"], + query=x.content, + ) + for result in results + ], + ) + return msg