diff --git a/README.md b/README.md
index 9f56eca78..d33a9833c 100644
--- a/README.md
+++ b/README.md
@@ -28,7 +28,6 @@ Welcome to join our community on
|---------|----------|--------|
| | | |
-
----
## News
@@ -40,6 +39,7 @@ Table of Contents
=================
- [AgentScope](#agentscope)
+ - [News](#news)
- [Table of Contents](#table-of-contents)
- [Installation](#installation)
- [From source](#from-source)
@@ -48,6 +48,7 @@ Table of Contents
- [Basic Usage](#basic-usage)
- [Step 1: Prepare Model Configs](#step-1-prepare-model-configs)
- [OpenAI API Config](#openai-api-config)
+ - [DashScope API Config](#dashscope-api-config)
- [Post Request API Config](#post-request-api-config)
- [Step 2: Create Agents](#step-2-create-agents)
- [Step 3: Construct Conversation](#step-3-construct-conversation)
@@ -124,6 +125,7 @@ AgentScope supports the following model API services:
| OpenAI Chat API | `openai` | Standard OpenAI Chat API, FastChat and vllm |
| OpenAI DALL-E API | `openai_dall_e` | Standard DALL-E API |
| OpenAI Embedding API | `openai_embedding` | OpenAI embedding API |
+| DashScope Chat API | `dashscope_chat` | DashScope chat API, including Qwen series |
| Post API | `post_api` | Huggingface/ModelScope inference API, and customized post API |
##### OpenAI API Config
@@ -144,6 +146,21 @@ For OpenAI APIs, you need to prepare a dict of model config with the following f
}
```
+##### DashScope API Config
+
+For DashScope APIs, you need to prepare a dict of model config with the following fields:
+
+```
+{
+ "config_name": "{config name}", # The name to identify the config
+ "model_type": "dashscope_chat" | "dashscope_text_embedding" | "dashscope_image_synthesis",
+ "model_name": "{model name, e.g. qwen-max}", # The model in dashscope API
+ "api_key": "xxx", # The API key for DashScope API.
+}
+```
+
+> Note: The dashscope APIs may have strict requirements on the `role` field in messages. Please use with caution.
+
##### Post Request API Config
For post requests APIs, the config contains the following fields.
diff --git a/README_ZH.md b/README_ZH.md
index 3e8e3c079..04f915ddc 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -38,6 +38,7 @@ AgentScope是一款全新的Multi-Agent框架,专为应用开发者打造,
- [基础使用](#基础使用)
- [第1步:准备Model Configs](#第1步准备model-configs)
- [OpenAI API Configs](#openai-api-configs)
+ - [DashScope API Config](#dashscope-api-config)
- [Post Request API Config](#post-request-api-config)
- [第2步:创建Agent](#第2步创建agent)
- [第3步:构造对话](#第3步构造对话)
@@ -120,6 +121,7 @@ AgentScope支持以下模型API服务:
| OpenAI Chat API | `openai` | 标准OpenAI Chat API, FastChat和vllm |
| OpenAI DALL-E API | `openai_dall_e` | 标准DALL-E API |
| OpenAI Embedding API | `openai_embedding` | OpenAI 嵌入式API |
+| DashScope Chat API | `dashscope_chat` | DashScope chat API,其中包含通义千问系列 |
| Post API | `post_api` | Huggingface/ModelScope 推理API, 以及定制化的post API |
##### OpenAI API Configs
@@ -137,6 +139,21 @@ AgentScope支持以下模型API服务:
}
```
+##### DashScope API Config
+
+对于 DashScope API,你需要准备一个包含如下字段的配置字典:
+
+```
+{
+ "config_name": "{配置名称}", # 用于识别配置的名称
+ "model_type": "dashscope_chat" | "dashscope_text_embedding" | "dashscope_image_synthesis",
+ "model_name": "{模型名称,例如 qwen-max}", # dashscope 中的模型
+ "api_key": "xxx", # The API key for DashScope API.
+}
+```
+
+> 注意: dashscope API 可能对消息中的`role`域有严格的要求。请谨慎使用。
+
##### Post Request API Config
对于post请求API,配置包含以下字段。
diff --git a/examples/distributed/README.md b/examples/distributed/README.md
index 8cb4169c8..1e1610894 100644
--- a/examples/distributed/README.md
+++ b/examples/distributed/README.md
@@ -27,39 +27,58 @@ Now, you can chat with the assistant agent using the command line.
## Distributed debate competition (`distributed_debate.py`)
This example simulate a debate competition with three participant agents, including the affirmative side (**Pro**), the negative side (**Con**), and the adjudicator (**Judge**).
+**You can join in the debate as Pro or Con or both.**
-Pro believes that AGI can be achieved using the GPT model framework, while Con contests it. Judge listens to both sides' arguments and provides an analytical judgment on which side presented a more compelling and reasonable case.
+Pro believes that AGI can be achieved using the GPT model framework, while Con contests it.
+Judge listens to both sides' arguments and provides an analytical judgment on which side presented a more compelling and reasonable case.
Each agent is an independent process and can run on different machines.
+You can join the debate as Pro or Con by providing the `--is-human` argument.
Messages generated by any agents can be observed by other agents in the debate.
-```
-# step 1: setup Pro, Con, Judge agent server separately
+> Due to role restrictions, DashScope APIs (e.g. Qwen) are currently unable to execute this example.
-# please make sure the ports are available and the ip addresses are accessible, here we use localhost as an example.
-# if you run all agent servers on the same machine, you can ignore the host field, it will use localhost by default.
+### Step 1: setup Pro, Con agent servers
-# setup Pro
+```shell
cd examples/distributed
+# setup LLM-based Pro
python distributed_debate.py --role pro --pro-host localhost --pro-port 12011
+# or join the debate as Pro by yourself
+# python distributed_debate.py --role pro --pro-host localhost --pro-port 12011 --is-human
+```
-# setup Con
+```shell
cd examples/distributed
+# setup LLM-base Con
python distributed_debate.py --role con --con-host localhost --con-port 12012
+# or join the debate as Con by yourself
+# python distributed_debate.py --role con --con-host localhost --con-port 12012 --is-human
+```
-# setup Judge
-cd examples/distributed
-python distributed_debate.py --role judge --judge-host localhost --judge-port 12013
-
+> Please make sure the ports are available and the ip addresses are accessible, here we use localhost as an example.
+> If you run all agent servers on the same machine, you can ignore the host field, it will use localhost by default.
-# step 2: run the main process
+### step 2: run the main process
+```shell
+# setup main (Judge is in it)
cd example/distributed
python distributed_debate.py --role main \
--pro-host localhost --pro-port 12011 \
- --con-host localhost --con-port 12012 \
- --judge-host localhost --judge-port 12013
+ --con-host localhost --con-port 12012
+```
+
+### step 3: watch or join in the debate in your terminal
+
+Suppose you join the debate as Con, you will see the following in your command line.
+
+```text
+System: Welcome to the debate on whether Artificial General Intelligence (AGI) can be achieved
+...
+Pro: Thank you. I argue that AGI can be achieved using the GPT model framework.
+...
-# step 3: watch the debate process in the terminal of the main process.
+User Input:
```
diff --git a/examples/distributed/configs/debate_agent_configs.json b/examples/distributed/configs/debate_agent_configs.json
index 819caaed1..05da11aa2 100644
--- a/examples/distributed/configs/debate_agent_configs.json
+++ b/examples/distributed/configs/debate_agent_configs.json
@@ -1,6 +1,6 @@
[
{
- "class": "DictDialogAgent",
+ "class": "DialogAgent",
"args": {
"name": "Pro",
"sys_prompt": "Assume the role of a debater who is arguing in favor of the proposition that AGI (Artificial General Intelligence) can be achieved using the GPT model framework. Construct a coherent and persuasive argument, including scientific, technological, and theoretical evidence, to support the statement that GPT models are a viable path to AGI. Highlight the advancements in language understanding, adaptability, and scalability of GPT models as key factors in progressing towards AGI.",
@@ -9,7 +9,7 @@
}
},
{
- "class": "DictDialogAgent",
+ "class": "DialogAgent",
"args": {
"name": "Con",
"sys_prompt": "Assume the role of a debater who is arguing against the proposition that AGI can be achieved using the GPT model framework. Construct a coherent and persuasive argument, including scientific, technological, and theoretical evidence, to support the statement that GPT models, while impressive, are insufficient for reaching AGI. Discuss the limitations of GPT models such as lack of understanding, consciousness, ethical reasoning, and general problem-solving abilities that are essential for true AGI.",
@@ -18,7 +18,7 @@
}
},
{
- "class": "DictDialogAgent",
+ "class": "DialogAgent",
"args": {
"name": "Judge",
"sys_prompt": "Assume the role of an impartial judge in a debate where the affirmative side argues that AGI can be achieved using the GPT model framework, and the negative side contests this. Listen to both sides' arguments and provide an analytical judgment on which side presented a more compelling and reasonable case. Consider the strength of the evidence, the persuasiveness of the reasoning, and the overall coherence of the arguments presented by each side.",
diff --git a/examples/distributed/distributed_debate.py b/examples/distributed/distributed_debate.py
index 669b05304..a5564c657 100644
--- a/examples/distributed/distributed_debate.py
+++ b/examples/distributed/distributed_debate.py
@@ -4,6 +4,8 @@
import argparse
import json
+from user_proxy_agent import UserProxyAgent
+
import agentscope
from agentscope.msghub import msghub
from agentscope.agents.dialog_agent import DialogAgent
@@ -11,7 +13,7 @@
from agentscope.message import Msg
from agentscope.utils.logging_utils import logger
-ANNOUNCEMENT = """
+FIRST_ROUND = """
Welcome to the debate on whether Artificial General Intelligence (AGI) can be achieved using the GPT model framework. This debate will consist of three rounds. In each round, the affirmative side will present their argument first, followed by the negative side. After both sides have presented, the adjudicator will summarize the key points and analyze the strengths of the arguments.
The rules are as follows:
@@ -24,15 +26,28 @@
Let us begin the first round. The affirmative side: please present your argument for why AGI can be achieved using the GPT model framework.
""" # noqa
+SECOND_ROUND = """
+Let us begin the second round. It's your turn, the affirmative side.
+"""
+
+THIRD_ROUND = """
+Next is the final round.
+"""
+
+END = """
+Judge, please declare the overall winner now.
+"""
+
def parse_args() -> argparse.Namespace:
"""Parse arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--role",
- choices=["pro", "con", "judge", "main"],
+ choices=["pro", "con", "main"],
default="main",
)
+ parser.add_argument("--is-human", action="store_true")
parser.add_argument("--pro-host", type=str, default="localhost")
parser.add_argument(
"--pro-port",
@@ -59,28 +74,34 @@ def setup_server(parsed_args: argparse.Namespace) -> None:
agentscope.init(
model_configs="configs/model_configs.json",
)
- with open(
- "configs/debate_agent_configs.json",
- "r",
- encoding="utf-8",
- ) as f:
- configs = json.load(f)
- configs = {
- "pro": configs[0]["args"],
- "con": configs[1]["args"],
- "judge": configs[2]["args"],
- }
- config = configs[parsed_args.role]
- host = getattr(parsed_args, f"{parsed_args.role}_host")
- port = getattr(parsed_args, f"{parsed_args.role}_port")
- server_launcher = RpcAgentServerLauncher(
- agent_class=DialogAgent,
- agent_kwargs=config,
- host=host,
- port=port,
- )
- server_launcher.launch()
- server_launcher.wait_until_terminate()
+ host = getattr(parsed_args, f"{parsed_args.role}_host")
+ port = getattr(parsed_args, f"{parsed_args.role}_port")
+ if parsed_args.is_human:
+ agent_class = UserProxyAgent
+ config = {"name": parsed_args.role}
+ else:
+ with open(
+ "configs/debate_agent_configs.json",
+ "r",
+ encoding="utf-8",
+ ) as f:
+ configs = json.load(f)
+ configs = {
+ "pro": configs[0]["args"],
+ "con": configs[1]["args"],
+ "judge": configs[2]["args"],
+ }
+ config = configs[parsed_args.role]
+ agent_class = DialogAgent
+
+ server_launcher = RpcAgentServerLauncher(
+ agent_class=agent_class,
+ agent_kwargs=config,
+ host=host,
+ port=port,
+ )
+ server_launcher.launch(in_subprocess=False)
+ server_launcher.wait_until_terminate()
def run_main_process(parsed_args: argparse.Namespace) -> None:
@@ -99,24 +120,23 @@ def run_main_process(parsed_args: argparse.Namespace) -> None:
port=parsed_args.con_port,
launch_server=False,
)
- judge_agent = judge_agent.to_dist(
- host=parsed_args.judge_host,
- port=parsed_args.judge_port,
- launch_server=False,
- )
participants = [pro_agent, con_agent, judge_agent]
- hint = Msg(name="System", content=ANNOUNCEMENT)
- x = None
- with msghub(participants=participants, announcement=hint):
- for _ in range(3):
- pro_resp = pro_agent(x)
+ announcements = [
+ Msg(name="system", content=FIRST_ROUND),
+ Msg(name="system", content=SECOND_ROUND),
+ Msg(name="system", content=THIRD_ROUND),
+ ]
+ end = Msg(name="system", content=END)
+ with msghub(participants=participants) as hub:
+ for i in range(3):
+ hub.broadcast(announcements[i])
+ pro_resp = pro_agent()
logger.chat(pro_resp)
- con_resp = con_agent(pro_resp)
+ con_resp = con_agent()
logger.chat(con_resp)
- x = judge_agent(con_resp)
- logger.chat(x)
- x = judge_agent(x)
- logger.chat(x)
+ judge_agent()
+ hub.broadcast(end)
+ judge_agent()
if __name__ == "__main__":
diff --git a/examples/distributed/user_proxy_agent.py b/examples/distributed/user_proxy_agent.py
new file mode 100644
index 000000000..6c47bac23
--- /dev/null
+++ b/examples/distributed/user_proxy_agent.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""User Proxy Agent class for distributed usage"""
+from typing import Sequence, Union
+from typing import Optional
+
+from agentscope.agents import UserAgent
+
+
+class UserProxyAgent(UserAgent):
+ """User proxy agent class"""
+
+ def reply(
+ self,
+ x: dict = None,
+ required_keys: Optional[Union[list[str], str]] = None,
+ ) -> dict:
+ if x is not None:
+ self.speak(x)
+ return super().reply(x, required_keys)
+
+ def observe(self, x: Union[dict, Sequence[dict]]) -> None:
+ if x is not None:
+ self.speak(x) # type: ignore[arg-type]
+ self.memory.add(x)
diff --git a/src/agentscope/agents/rpc_agent.py b/src/agentscope/agents/rpc_agent.py
index 6edf1e808..f863fbca7 100644
--- a/src/agentscope/agents/rpc_agent.py
+++ b/src/agentscope/agents/rpc_agent.py
@@ -397,7 +397,13 @@ def _launch_in_sub(self) -> None:
)
def launch(self, in_subprocess: bool = True) -> None:
- """launch a local rpc agent server."""
+ """launch a rpc agent server.
+
+ Args:
+ in_subprocess (bool, optional): launch the server in subprocess.
+ Defaults to True. For agents that need to obtain command line
+ input, such as UserAgent, please set this value to False.
+ """
if in_subprocess:
self._launch_in_sub()
else:
diff --git a/src/agentscope/web/studio/utils.py b/src/agentscope/web/studio/utils.py
index 503e35516..ebf753d81 100644
--- a/src/agentscope/web/studio/utils.py
+++ b/src/agentscope/web/studio/utils.py
@@ -180,12 +180,12 @@ def audio2text(audio_path: str) -> str:
return " ".join([s["text"] for s in result["output"]["sentence"]])
-def user_input() -> str:
+def user_input(prefix: str = "User input: ") -> str:
"""get user input"""
if hasattr(thread_local_data, "uid"):
content = get_player_input(
uid=thread_local_data.uid,
)
else:
- content = input("User input: ")
+ content = input(prefix)
return content