From 0b4bf85481d3cde1f4a5f16d6faeb6e9c9d2a49c Mon Sep 17 00:00:00 2001 From: DavdGao Date: Tue, 12 Mar 2024 14:43:15 +0800 Subject: [PATCH] [Ready] Translate tutorial into Chinese (#49) Co-authored-by: ZiTao-Li Co-authored-by: rayrayraykk Co-authored-by: panxuchen.pxc Co-authored-by: qbc --- .github/workflows/sphinx_docs.yml | 2 +- .pre-commit-config.yaml | 1 + docs/sphinx_doc/Makefile | 42 ++- docs/sphinx_doc/assets/redirect.html | 12 + docs/sphinx_doc/en/source/_static/custom.css | 4 + .../source/_templates/language_selector.html | 5 + .../en/source/_templates/layout.html | 3 + .../{ => en}/source/agentscope.agents.rst | 0 .../{ => en}/source/agentscope.memory.rst | 0 .../{ => en}/source/agentscope.models.rst | 0 .../{ => en}/source/agentscope.pipelines.rst | 0 .../{ => en}/source/agentscope.rpc.rst | 0 .../sphinx_doc/{ => en}/source/agentscope.rst | 0 .../agentscope.service.execute_code.rst | 0 .../source/agentscope.service.file.rst | 0 .../source/agentscope.service.retrieval.rst | 0 .../{ => en}/source/agentscope.service.rst | 0 .../source/agentscope.service.sql_query.rst | 0 .../agentscope.service.text_processing.rst | 0 .../source/agentscope.service.web_search.rst | 0 .../{ => en}/source/agentscope.utils.rst | 0 .../{ => en}/source/agentscope.web.rst | 3 +- docs/sphinx_doc/{ => en}/source/conf.py | 6 + docs/sphinx_doc/{ => en}/source/index.rst | 0 .../en/source/tutorial/101-agentscope.md | 116 +++++++ .../source/tutorial/102-installation.md} | 16 +- .../{ => en}/source/tutorial/103-example.md | 43 ++- .../{ => en}/source/tutorial/104-usecase.md | 18 +- .../{ => en}/source/tutorial/105-logging.md | 5 +- .../{ => en}/source/tutorial/201-agent.md | 6 +- .../{ => en}/source/tutorial/202-pipeline.md | 8 +- .../en/source/tutorial/203-model.md | 220 ++++++++++++ .../en/source/tutorial/204-service.md | 260 ++++++++++++++ .../{ => en}/source/tutorial/205-memory.md | 171 +++++++--- .../{ => en}/source/tutorial/206-prompt.md | 17 +- .../{ => en}/source/tutorial/207-monitor.md | 4 +- .../en/source/tutorial/208-distribute.md | 156 +++++++++ .../{ => en}/source/tutorial/301-community.md | 4 +- .../source/tutorial/302-contribute.md | 4 +- .../{ => en}/source/tutorial/advance.rst | 0 .../{ => en}/source/tutorial/contribute.rst | 0 .../{ => en}/source/tutorial/main.md | 30 +- .../{ => en}/source/tutorial/quick_start.rst | 4 +- docs/sphinx_doc/make.bat | 35 -- .../source/tutorial/102-concepts.md | 46 --- docs/sphinx_doc/source/tutorial/203-model.md | 256 -------------- .../sphinx_doc/source/tutorial/204-service.md | 135 -------- .../source/tutorial/208-distribute.md | 153 --------- .../zh_CN/source/_static/custom.css | 4 + .../source/_templates/language_selector.html | 5 + .../zh_CN/source/_templates/layout.html | 3 + .../zh_CN/source/agentscope.agents.rst | 59 ++++ .../zh_CN/source/agentscope.memory.rst | 20 ++ .../zh_CN/source/agentscope.models.rst | 42 +++ .../zh_CN/source/agentscope.pipelines.rst | 18 + .../zh_CN/source/agentscope.rpc.rst | 20 ++ docs/sphinx_doc/zh_CN/source/agentscope.rst | 48 +++ .../agentscope.service.execute_code.rst | 11 + .../zh_CN/source/agentscope.service.file.rst | 28 ++ .../source/agentscope.service.retrieval.rst | 20 ++ .../zh_CN/source/agentscope.service.rst | 33 ++ .../source/agentscope.service.sql_query.rst | 27 ++ .../agentscope.service.text_processing.rst | 12 + .../source/agentscope.service.web_search.rst | 11 + .../zh_CN/source/agentscope.utils.rst | 43 +++ .../zh_CN/source/agentscope.web.rst | 10 + docs/sphinx_doc/zh_CN/source/conf.py | 79 +++++ docs/sphinx_doc/zh_CN/source/index.rst | 46 +++ .../source/tutorial_zh/101-agentscope.md | 93 +++++ .../source/tutorial_zh/102-installation.md | 69 ++++ .../zh_CN/source/tutorial_zh/103-example.md | 117 +++++++ .../zh_CN/source/tutorial_zh/104-usecase.md | 319 ++++++++++++++++++ .../zh_CN/source/tutorial_zh/105-logging.md | 95 ++++++ .../zh_CN/source/tutorial_zh/201-agent.md | 174 ++++++++++ .../zh_CN/source/tutorial_zh/202-pipeline.md | 302 +++++++++++++++++ .../zh_CN/source/tutorial_zh/203-model.md | 209 ++++++++++++ .../zh_CN/source/tutorial_zh/204-service.md | 238 +++++++++++++ .../zh_CN/source/tutorial_zh/205-memory.md | 214 ++++++++++++ .../zh_CN/source/tutorial_zh/206-prompt.md | 69 ++++ .../zh_CN/source/tutorial_zh/207-monitor.md | 172 ++++++++++ .../source/tutorial_zh/208-distribute.md | 157 +++++++++ .../zh_CN/source/tutorial_zh/301-community.md | 34 ++ .../source/tutorial_zh/302-contribute.md | 70 ++++ .../zh_CN/source/tutorial_zh/advance.rst | 14 + .../zh_CN/source/tutorial_zh/contribute.rst | 8 + .../zh_CN/source/tutorial_zh/main.md | 35 ++ .../zh_CN/source/tutorial_zh/quick_start.rst | 11 + 87 files changed, 3960 insertions(+), 764 deletions(-) create mode 100644 docs/sphinx_doc/assets/redirect.html create mode 100644 docs/sphinx_doc/en/source/_static/custom.css create mode 100644 docs/sphinx_doc/en/source/_templates/language_selector.html create mode 100644 docs/sphinx_doc/en/source/_templates/layout.html rename docs/sphinx_doc/{ => en}/source/agentscope.agents.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.memory.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.models.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.pipelines.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.rpc.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.execute_code.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.file.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.retrieval.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.sql_query.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.text_processing.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.service.web_search.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.utils.rst (100%) rename docs/sphinx_doc/{ => en}/source/agentscope.web.rst (63%) rename docs/sphinx_doc/{ => en}/source/conf.py (97%) rename docs/sphinx_doc/{ => en}/source/index.rst (100%) create mode 100644 docs/sphinx_doc/en/source/tutorial/101-agentscope.md rename docs/sphinx_doc/{source/tutorial/101-installation.md => en/source/tutorial/102-installation.md} (90%) rename docs/sphinx_doc/{ => en}/source/tutorial/103-example.md (65%) rename docs/sphinx_doc/{ => en}/source/tutorial/104-usecase.md (94%) rename docs/sphinx_doc/{ => en}/source/tutorial/105-logging.md (98%) rename docs/sphinx_doc/{ => en}/source/tutorial/201-agent.md (98%) rename docs/sphinx_doc/{ => en}/source/tutorial/202-pipeline.md (97%) create mode 100644 docs/sphinx_doc/en/source/tutorial/203-model.md create mode 100644 docs/sphinx_doc/en/source/tutorial/204-service.md rename docs/sphinx_doc/{ => en}/source/tutorial/205-memory.md (51%) rename docs/sphinx_doc/{ => en}/source/tutorial/206-prompt.md (80%) rename docs/sphinx_doc/{ => en}/source/tutorial/207-monitor.md (98%) create mode 100644 docs/sphinx_doc/en/source/tutorial/208-distribute.md rename docs/sphinx_doc/{ => en}/source/tutorial/301-community.md (96%) rename docs/sphinx_doc/{ => en}/source/tutorial/302-contribute.md (97%) rename docs/sphinx_doc/{ => en}/source/tutorial/advance.rst (100%) rename docs/sphinx_doc/{ => en}/source/tutorial/contribute.rst (100%) rename docs/sphinx_doc/{ => en}/source/tutorial/main.md (59%) rename docs/sphinx_doc/{ => en}/source/tutorial/quick_start.rst (72%) delete mode 100644 docs/sphinx_doc/make.bat delete mode 100644 docs/sphinx_doc/source/tutorial/102-concepts.md delete mode 100644 docs/sphinx_doc/source/tutorial/203-model.md delete mode 100644 docs/sphinx_doc/source/tutorial/204-service.md delete mode 100644 docs/sphinx_doc/source/tutorial/208-distribute.md create mode 100644 docs/sphinx_doc/zh_CN/source/_static/custom.css create mode 100644 docs/sphinx_doc/zh_CN/source/_templates/language_selector.html create mode 100644 docs/sphinx_doc/zh_CN/source/_templates/layout.html create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.agents.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.memory.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.models.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.pipelines.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.rpc.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.execute_code.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.file.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.retrieval.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.sql_query.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.text_processing.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.service.web_search.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.utils.rst create mode 100644 docs/sphinx_doc/zh_CN/source/agentscope.web.rst create mode 100644 docs/sphinx_doc/zh_CN/source/conf.py create mode 100644 docs/sphinx_doc/zh_CN/source/index.rst create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/101-agentscope.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/102-installation.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/103-example.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/104-usecase.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/105-logging.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/201-agent.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/202-pipeline.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/203-model.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/204-service.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/205-memory.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/206-prompt.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/207-monitor.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/208-distribute.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/301-community.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/302-contribute.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/advance.rst create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/contribute.rst create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/main.md create mode 100644 docs/sphinx_doc/zh_CN/source/tutorial_zh/quick_start.rst diff --git a/.github/workflows/sphinx_docs.yml b/.github/workflows/sphinx_docs.yml index 86750643b..ccf993dff 100644 --- a/.github/workflows/sphinx_docs.yml +++ b/.github/workflows/sphinx_docs.yml @@ -33,7 +33,7 @@ jobs: name: Build Documentation run: | cd docs/sphinx_doc - make clean html + make clean all - name: Upload Documentation uses: actions/upload-artifact@v3 with: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 35f778a37..8ba726ab7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,6 +28,7 @@ repos: (?x)( pb2\.py$ | grpc\.py$ + | ^docs ) args: [ --disallow-untyped-defs, --disallow-incomplete-defs, diff --git a/docs/sphinx_doc/Makefile b/docs/sphinx_doc/Makefile index d0c3cbf10..a90845ebf 100644 --- a/docs/sphinx_doc/Makefile +++ b/docs/sphinx_doc/Makefile @@ -1,20 +1,32 @@ -# Minimal makefile for Sphinx documentation -# +# Makefile -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build +SPHINXBUILD = sphinx-build +SPHINXPROJ = AgentScope-Doc +ASSETSDIR = assets +BUILDDIR = build/html +SOURCEDIR_EN = en/source +BUILDDIR_EN = build/html/en +SOURCEDIR_ZH = zh_CN/source +BUILDDIR_ZH = build/html/zh_CN -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +# English document +en: + @$(SPHINXBUILD) -b html "$(SOURCEDIR_EN)" "$(BUILDDIR_EN)" + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR_EN)" -.PHONY: help Makefile +# Chinese document +zh_CN: + @$(SPHINXBUILD) -b html "$(SOURCEDIR_ZH)" "$(BUILDDIR_ZH)" + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR_ZH)" + +index: + @cp "$(ASSETSDIR)/redirect.html" "$(BUILDDIR)/index.html" -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR_EN)" "$(BUILDDIR_EN)" $(O) + +all: en zh_CN index + +.PHONY: all en zh_CN index \ No newline at end of file diff --git a/docs/sphinx_doc/assets/redirect.html b/docs/sphinx_doc/assets/redirect.html new file mode 100644 index 000000000..1b7980de8 --- /dev/null +++ b/docs/sphinx_doc/assets/redirect.html @@ -0,0 +1,12 @@ + + + + + + AgentScope Documentation + + +

Redirecting to English documentation...

+

If you are not redirected, click here.

+ + diff --git a/docs/sphinx_doc/en/source/_static/custom.css b/docs/sphinx_doc/en/source/_static/custom.css new file mode 100644 index 000000000..68f11ceed --- /dev/null +++ b/docs/sphinx_doc/en/source/_static/custom.css @@ -0,0 +1,4 @@ +.language-selector a { + color: white; + width: 20px; +} \ No newline at end of file diff --git a/docs/sphinx_doc/en/source/_templates/language_selector.html b/docs/sphinx_doc/en/source/_templates/language_selector.html new file mode 100644 index 000000000..cd289bf7e --- /dev/null +++ b/docs/sphinx_doc/en/source/_templates/language_selector.html @@ -0,0 +1,5 @@ + +
+ English | + 中文 +
diff --git a/docs/sphinx_doc/en/source/_templates/layout.html b/docs/sphinx_doc/en/source/_templates/layout.html new file mode 100644 index 000000000..1d182d309 --- /dev/null +++ b/docs/sphinx_doc/en/source/_templates/layout.html @@ -0,0 +1,3 @@ + +{% extends "!layout.html" %} {% block sidebartitle %} {{ super() }} {% include +"language_selector.html" %} {% endblock %} diff --git a/docs/sphinx_doc/source/agentscope.agents.rst b/docs/sphinx_doc/en/source/agentscope.agents.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.agents.rst rename to docs/sphinx_doc/en/source/agentscope.agents.rst diff --git a/docs/sphinx_doc/source/agentscope.memory.rst b/docs/sphinx_doc/en/source/agentscope.memory.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.memory.rst rename to docs/sphinx_doc/en/source/agentscope.memory.rst diff --git a/docs/sphinx_doc/source/agentscope.models.rst b/docs/sphinx_doc/en/source/agentscope.models.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.models.rst rename to docs/sphinx_doc/en/source/agentscope.models.rst diff --git a/docs/sphinx_doc/source/agentscope.pipelines.rst b/docs/sphinx_doc/en/source/agentscope.pipelines.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.pipelines.rst rename to docs/sphinx_doc/en/source/agentscope.pipelines.rst diff --git a/docs/sphinx_doc/source/agentscope.rpc.rst b/docs/sphinx_doc/en/source/agentscope.rpc.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.rpc.rst rename to docs/sphinx_doc/en/source/agentscope.rpc.rst diff --git a/docs/sphinx_doc/source/agentscope.rst b/docs/sphinx_doc/en/source/agentscope.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.rst rename to docs/sphinx_doc/en/source/agentscope.rst diff --git a/docs/sphinx_doc/source/agentscope.service.execute_code.rst b/docs/sphinx_doc/en/source/agentscope.service.execute_code.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.execute_code.rst rename to docs/sphinx_doc/en/source/agentscope.service.execute_code.rst diff --git a/docs/sphinx_doc/source/agentscope.service.file.rst b/docs/sphinx_doc/en/source/agentscope.service.file.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.file.rst rename to docs/sphinx_doc/en/source/agentscope.service.file.rst diff --git a/docs/sphinx_doc/source/agentscope.service.retrieval.rst b/docs/sphinx_doc/en/source/agentscope.service.retrieval.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.retrieval.rst rename to docs/sphinx_doc/en/source/agentscope.service.retrieval.rst diff --git a/docs/sphinx_doc/source/agentscope.service.rst b/docs/sphinx_doc/en/source/agentscope.service.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.rst rename to docs/sphinx_doc/en/source/agentscope.service.rst diff --git a/docs/sphinx_doc/source/agentscope.service.sql_query.rst b/docs/sphinx_doc/en/source/agentscope.service.sql_query.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.sql_query.rst rename to docs/sphinx_doc/en/source/agentscope.service.sql_query.rst diff --git a/docs/sphinx_doc/source/agentscope.service.text_processing.rst b/docs/sphinx_doc/en/source/agentscope.service.text_processing.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.text_processing.rst rename to docs/sphinx_doc/en/source/agentscope.service.text_processing.rst diff --git a/docs/sphinx_doc/source/agentscope.service.web_search.rst b/docs/sphinx_doc/en/source/agentscope.service.web_search.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.service.web_search.rst rename to docs/sphinx_doc/en/source/agentscope.service.web_search.rst diff --git a/docs/sphinx_doc/source/agentscope.utils.rst b/docs/sphinx_doc/en/source/agentscope.utils.rst similarity index 100% rename from docs/sphinx_doc/source/agentscope.utils.rst rename to docs/sphinx_doc/en/source/agentscope.utils.rst diff --git a/docs/sphinx_doc/source/agentscope.web.rst b/docs/sphinx_doc/en/source/agentscope.web.rst similarity index 63% rename from docs/sphinx_doc/source/agentscope.web.rst rename to docs/sphinx_doc/en/source/agentscope.web.rst index 3fc7529bc..250189741 100644 --- a/docs/sphinx_doc/source/agentscope.web.rst +++ b/docs/sphinx_doc/en/source/agentscope.web.rst @@ -4,8 +4,7 @@ Web UI package app module ----------------------------- -.. automodule:: agentscope.web.app +.. automodule:: agentscope.web._app :members: - :undoc-members: gradio_groupchat :show-inheritance: diff --git a/docs/sphinx_doc/source/conf.py b/docs/sphinx_doc/en/source/conf.py similarity index 97% rename from docs/sphinx_doc/source/conf.py rename to docs/sphinx_doc/en/source/conf.py index 003a56492..3f998c348 100644 --- a/docs/sphinx_doc/source/conf.py +++ b/docs/sphinx_doc/en/source/conf.py @@ -19,6 +19,8 @@ # -- Project information ----------------------------------------------------- +language = "en" + project = "AgentScope" copyright = "2024, Alibaba Tongyi Lab" author = "SysML team of Alibaba Tongyi Lab" @@ -71,3 +73,7 @@ ".rst": "restructuredtext", ".md": "markdown", } + +html_css_files = [ + "custom.css", +] diff --git a/docs/sphinx_doc/source/index.rst b/docs/sphinx_doc/en/source/index.rst similarity index 100% rename from docs/sphinx_doc/source/index.rst rename to docs/sphinx_doc/en/source/index.rst diff --git a/docs/sphinx_doc/en/source/tutorial/101-agentscope.md b/docs/sphinx_doc/en/source/tutorial/101-agentscope.md new file mode 100644 index 000000000..e535a5fee --- /dev/null +++ b/docs/sphinx_doc/en/source/tutorial/101-agentscope.md @@ -0,0 +1,116 @@ +(101-agentscope-en)= + +# About AgentScope + +In this tutorial, we will provide an overview of AgentScope by answering +several questions, including what's AgentScope, what can AgentScope provide, +and why we should choose AgentScope. Let's get started! + +## What is AgentScope? + +AgentScope is a developer-centric multi-agent platform, which enables +developers to build their LLM-empowered multi-agent applications with less +effort. + +With the advance of large language models, developers are able to build +diverse applications. +In order to connect LLMs to data and services and solve complex tasks, +AgentScope provides a series of development tools and components for ease of +development. +It features + +- **usability**, +- **robustness**, and +- **the support of multi-modal data** and +- **distributed deployment**. + +## Key Concepts + +### Message + +Message is a carrier of information (e.g. instructions, multi-modal +data, and dialogue). In AgentScope, message is a Python dict subclass +with `name` and `content` as necessary fields, and `url` as an optional +field referring to additional resources. + +### Agent + +Agent is an autonomous entity capable of interacting with environment and +agents, and taking actions to change the environment. In AgentScope, an +agent takes message as input and generates corresponding response message. + +### Service + +Service refers to the functional APIs that enable agents to perform +specific tasks. In AgentScope, services are categorized into model API +services, which are channels to use the LLMs, and general API services, +which provide a variety of tool functions. + +### Workflow + +Workflow represents ordered sequences of agent executions and message +exchanges between agents, analogous to computational graphs in TensorFlow, +but with the flexibility to accommodate non-DAG structures. + +## Why AgentScope? + +**Exceptional usability for developers.** +AgentScope provides high usability for developers with flexible syntactic +sugars, ready-to-use components, and pre-built examples. + +**Robust fault tolerance for diverse models and APIs.** +AgentScope ensures robust fault tolerance for diverse models, APIs, and +allows developers to build customized fault-tolerant strategies. + +**Extensive compatibility for multi-modal application.** +AgentScope supports multi-modal data (e.g., files, images, audio and videos) +in both dialog presentation, message transmission and data storage. + +**Optimized efficiency for distributed multi-agent operations.** AgentScope +introduces an actor-based distributed mechanism that enables centralized +programming of complex distributed workflows, and automatic parallel +optimization. + +## How is AgentScope designed? + +The architecture of AgentScope comprises three hierarchical layers. The +layers provide supports for multi-agent applications from different levels, +including elementary and advanced functionalities of a single agent +(**utility layer**), resources and runtime management (**manager and wrapper +layer**), and agent-level to workflow-level programming interfaces (**agent +layer**). AgentScope introduces intuitive abstractions designed to fulfill +the diverse functionalities inherent to each layer and simplify the +complicated interlayer dependencies when building multi-agent systems. +Furthermore, we offer programming interfaces and default mechanisms to +strengthen the resilience of multi-agent systems against faults within +different layers. + +## AgentScope Code Structure + +```bash +AgentScope +├── src +│ ├── agentscope +│ | ├── agents # Core components and implementations pertaining to agents. +│ | ├── memory # Structures for agent memory. +│ | ├── models # Interfaces for integrating diverse model APIs. +│ | ├── pipeline # Fundamental components and implementations for running pipelines. +│ | ├── rpc # Rpc module for agent distributed deployment. +│ | ├── service # Services offering functions independent of memory and state. +| | ├── web # WebUI used to show dialogs. +│ | ├── utils # Auxiliary utilities and helper functions. +│ | ├── message.py # Definitions and implementations of messaging between agents. +│ | ├── prompt.py # Prompt engineering module for model input. +│ | ├── ... .. +│ | ├── ... .. +├── scripts # Scripts for launching local Model API +├── examples # Pre-built examples of different applications. +├── docs # Documentation tool for API reference. +├── tests # Unittest modules for continuous integration. +├── LICENSE # The official licensing agreement for AgentScope usage. +└── setup.py # Setup script for installing. +├── ... .. +└── ... .. +``` + +[[Return to the top]](#101-agentscope) diff --git a/docs/sphinx_doc/source/tutorial/101-installation.md b/docs/sphinx_doc/en/source/tutorial/102-installation.md similarity index 90% rename from docs/sphinx_doc/source/tutorial/101-installation.md rename to docs/sphinx_doc/en/source/tutorial/102-installation.md index 633c3cec1..51473c659 100644 --- a/docs/sphinx_doc/source/tutorial/101-installation.md +++ b/docs/sphinx_doc/en/source/tutorial/102-installation.md @@ -1,12 +1,12 @@ -(101-installation)= +(102-installation-en)= # Installation To install AgentScope, you need to have Python 3.9 or higher installed. We recommend setting up a new virtual environment specifically for AgentScope: -### Create a Virtual Environment +## Create a Virtual Environment -#### Using Conda +### Using Conda If you're using Conda as your package and environment management tool, you can create a new virtual environment with Python 3.9 using the following commands: @@ -18,7 +18,7 @@ conda create -n agentscope python=3.9 conda activate agentscope ``` -#### Using Virtualenv +### Using Virtualenv Alternatively, if you prefer `virtualenv`, you can install it first (if it's not already installed) and then create a new virtual environment as shown: @@ -33,9 +33,9 @@ virtualenv agentscope --python=python3.9 source agentscope/bin/activate # On Windows use `agentscope\Scripts\activate` ``` -### Installing AgentScope +## Installing AgentScope -#### Install with Pip +### Install with Pip If you prefer to install AgentScope from Pypi, you can do so easily using `pip`: @@ -46,7 +46,7 @@ pip install agentscope pip install agentscope[distribute] # On Mac use `pip install agentscope\[distribute\]` ``` -#### Install from Source +### Install from Source For users who prefer to install AgentScope directly from the source code, follow these steps to clone the repository and install the platform in editable mode: @@ -65,4 +65,4 @@ pip install -e .[distribute] # On Mac use `pip install -e .\[distribute\]` **Note**: The `[distribute]` option installs additional dependencies required for distributed applications. Remember to activate your virtual environment before running these commands. -[[Return to the top]](#installation) +[[Return to the top]](#102-installation-en) diff --git a/docs/sphinx_doc/source/tutorial/103-example.md b/docs/sphinx_doc/en/source/tutorial/103-example.md similarity index 65% rename from docs/sphinx_doc/source/tutorial/103-example.md rename to docs/sphinx_doc/en/source/tutorial/103-example.md index 08ead7cf5..64e1e0af5 100644 --- a/docs/sphinx_doc/source/tutorial/103-example.md +++ b/docs/sphinx_doc/en/source/tutorial/103-example.md @@ -1,19 +1,34 @@ -(103-example)= +(103-start-en)= -# Getting Started with a Simple Example +# Quick Start -AgentScope is a versatile platform for building and running multi-agent applications. We provide various pre-built examples that will help you quickly understand how to create and use multi-agent for various applications. In this tutorial, you will learn how to set up a **simple agent-based interaction**. +AgentScope is designed with a flexible communication mechanism. +In this tutorial, we will introduce the basic usage of AgentScope via a +simple standalone conversation between two agents (e.g. user and assistant +agents). -## Step1: Prepare Model Configs +## Step1: Prepare Model -Agent is the basic composition and communication unit in AgentScope. To initialize a model-based agent, you need to prepare your configs for avaliable models. AgentScope supports a variety of APIs for pre-trained models. Here is a table outlining the supported APIs and the type of arguments required for each: +AgentScope decouples the deployment and invocation of models to better build multi-agent applications. -| Model Usage | Model Type Argument in AgentScope | Supported APIs | -| --------------------------- | --------------------------------- |-----------------------------------------------------------------------------| -| Text generation | `openai` | Standard *OpenAI* chat API, FastChat and vllm | -| Image generation | `openai_dall_e` | *DALL-E* API for generating images | -| Embedding | `openai_embedding` | API for text embeddings | -| General usages in POST | `post_api` | *Huggingface* and *ModelScope* Inference API, and other customized post API | +In terms of model deployment, users can use third-party model services such +as OpenAI API, HuggingFace/ModelScope Inference API, and can also quickly +deploy local open-source model services through the [scripts] +() in +the repository. Currently, we support building basic model services quickly +using Flask with Transformers (or ModelScope), and also support deploying +local model services through FastChat and vllm inference engines. + +While in terms of model invocation, AgentScope provides a `ModelWrapper` class to encapsulate OpenAI API and RESTful Post Request calls. +Currently, the supported OpenAI APIs include Chat, Image generation, and Embedding. +Users can specify the model service by setting different model configs. + +| Model Usage | Supported APIs | +| --------------------------- |-----------------------------------------------------------------------------| +| Text generation | Standard *OpenAI* chat API, FastChat and vllm | +| Image generation | *DALL-E* API for generating images | +| Embedding | API for text embeddings | +| General usages in POST | *Huggingface* and *ModelScope* Inference API, and other customized post API | Each API has its specific configuration requirements. For example, to configure an OpenAI API, you would need to fill out the following fields in the model config in a dict, a yaml file or a json file: @@ -27,9 +42,9 @@ model_config = { } ``` -For open-source models, we support integration with various model interfaces such as HuggingFace, ModelScope, FastChat, and vllm. You can find scripts on deploying these services in the `scripts` directory, and we defer the detailed instructions to [[Using Different Model Sources with Model API]](203-model). +For open-source models, we support integration with various model interfaces such as HuggingFace, ModelScope, FastChat, and vllm. You can find scripts on deploying these services in the `scripts` directory, and we defer the detailed instructions to [[Using Different Model Sources with Model API]](#203-model). -You can register your configuration by calling AgentScope's initilization method as follow. Besides, you can also load more than one config by calling init mutliple times. +You can register your configuration by calling AgentScope's initialization method as follow. Besides, you can also load more than one config by calling init multiple times. ```python import agentscope @@ -99,4 +114,4 @@ while x is None or x.content != "exit": For more details about how to utilize pipelines for complex agent interactions, please refer to [[Agent Interactions: Dive deeper into Pipelines and Message Hub]](202-pipeline). -[[Return to the top]](#getting-started-with-a-simple-example) +[[Return to the top]](#103-start-en) diff --git a/docs/sphinx_doc/source/tutorial/104-usecase.md b/docs/sphinx_doc/en/source/tutorial/104-usecase.md similarity index 94% rename from docs/sphinx_doc/source/tutorial/104-usecase.md rename to docs/sphinx_doc/en/source/tutorial/104-usecase.md index 8fc6f1f7e..5c894cc65 100644 --- a/docs/sphinx_doc/source/tutorial/104-usecase.md +++ b/docs/sphinx_doc/en/source/tutorial/104-usecase.md @@ -1,4 +1,4 @@ -(104-usecase)= +(104-usecase-en)= # Crafting Your First Application @@ -12,19 +12,7 @@ Let the adventure begin to unlock the potential of multi-agent applications with ## Getting Started -Firstly, ensure that you have installed and configured AgentScope properly. Besides, we will involve the basic concepts of `Model API`, `Agent`, `Msg`, and `Pipeline,` as described in [Tutorial-Concept](102-concepts). The overview of this tutorial is shown below: - -- [Crafting Your First Application](#crafting-your-first-application) - - [Getting Started](#getting-started) - - [Step 1: Prepare Model API and Set Model Configs](#step-1-prepare-model-api-and-set-model-configs) - - [Step 2: Define the Roles of Each Agent](#step-2-define-the-roles-of-each-agent) - - [Step 3: Initialize AgentScope and the Agents](#step-3-initialize-agentscope-and-the-agents) - - [Step 4: Set Up the Game Logic](#step-4-set-up-the-game-logic) - - [Leverage Pipeline and MsgHub](#leverage-pipeline-and-msghub) - - [Implement Werewolf Pipeline](#implement-werewolf-pipeline) - - [Step 5: Run the Application](#step-5-run-the-application) - - [Next step](#next-step) - - [Other Example Applications](#other-example-applications) +Firstly, ensure that you have installed and configured AgentScope properly. Besides, we will involve the basic concepts of `Model API`, `Agent`, `Msg`, and `Pipeline,` as described in [Tutorial-Concept](101-agentscope). **Note**: all the configurations and code for this tutorial can be found in `examples/werewolf`. @@ -327,4 +315,4 @@ Now you've grasped how to conveniently set up a multi-agent application with Age - Example of Distributed Agents: [examples/Distributed Agents](https://github.com/modelscope/agentscope/tree/main/examples/distributed_agents/README.md) - ... -[[Return to the top]](#crafting-your-first-application) +[[Return to the top]](#104-usecase-en) diff --git a/docs/sphinx_doc/source/tutorial/105-logging.md b/docs/sphinx_doc/en/source/tutorial/105-logging.md similarity index 98% rename from docs/sphinx_doc/source/tutorial/105-logging.md rename to docs/sphinx_doc/en/source/tutorial/105-logging.md index 913fc92cc..98f872a8b 100644 --- a/docs/sphinx_doc/source/tutorial/105-logging.md +++ b/docs/sphinx_doc/en/source/tutorial/105-logging.md @@ -1,4 +1,4 @@ -(105-logging)= +(105-logging-en)= # Logging and WebUI @@ -88,7 +88,8 @@ By clicking a running instance, we can observe more details. ![The running details](https://img.alicdn.com/imgextra/i2/O1CN01AZtsf31MIHm4FmjjO_!!6000000001411-0-tps-3104-1849.jpg) ### Note + The WebUI is still under development. We will provide more features and better user experience in the future. -[[Return to the top]](#logging-and-webui) +[[Return to the top]](#105-logging-en) diff --git a/docs/sphinx_doc/source/tutorial/201-agent.md b/docs/sphinx_doc/en/source/tutorial/201-agent.md similarity index 98% rename from docs/sphinx_doc/source/tutorial/201-agent.md rename to docs/sphinx_doc/en/source/tutorial/201-agent.md index 76b6400b6..2ff5a5245 100644 --- a/docs/sphinx_doc/source/tutorial/201-agent.md +++ b/docs/sphinx_doc/en/source/tutorial/201-agent.md @@ -1,4 +1,4 @@ -(201-agent)= +(201-agent-en)= # Customizing Your Own Agent @@ -12,7 +12,7 @@ Each AgentBase derivative is composed of several key characteristics: * `memory`: This attribute enables agents to retain and recall past interactions, allowing them to maintain context in ongoing conversations. For more details about `memory`, we defer to [Memory and Message Management](205-memory). -* `model`: The model is the computational engine of the agent, responsible for making a response given existing memory and input. For more details about `model`, we defer to [Using Different Model Sources with Model API](203-model). +* `model`: The model is the computational engine of the agent, responsible for making a response given existing memory and input. For more details about `model`, we defer to [Using Different Model Sources with Model API](#203-model). * `sys_prompt` & `engine`: The system prompt acts as predefined instructions that guide the agent in its interactions; and the `engine` is used to dynamically generate a suitable prompt. For more details about them, we defer to [Prompt Engine](206-prompt). @@ -170,4 +170,4 @@ user_agent_config = { user_proxy_agent = UserAgent(**user_agent_config) ``` -[[Return to the top]](#customizing-your-own-agent) +[[Return to the top]](#201-agent-en) diff --git a/docs/sphinx_doc/source/tutorial/202-pipeline.md b/docs/sphinx_doc/en/source/tutorial/202-pipeline.md similarity index 97% rename from docs/sphinx_doc/source/tutorial/202-pipeline.md rename to docs/sphinx_doc/en/source/tutorial/202-pipeline.md index d61313fe5..00841bd0e 100644 --- a/docs/sphinx_doc/source/tutorial/202-pipeline.md +++ b/docs/sphinx_doc/en/source/tutorial/202-pipeline.md @@ -1,4 +1,4 @@ -(202-pipeline)= +(202-pipeline-en)= # Agent Interactions: Dive deeper into Pipelines and Message Hub @@ -216,10 +216,10 @@ This section illustrates how pipelines can simplify the implementation of logic It's worth noting that AgentScope supports the combination of pipelines to create complex interactions. For example, we can create a pipeline that executes a sequence of agents in order, and then executes another pipeline that executes a sequence of agents in condition. ```python -from agentscope.pipelines import SequentialPipeline, ParallelPipeline +from agentscope.pipelines import SequentialPipeline, IfElsePipeline # Create a pipeline that executes agents in order pipe1 = SequentialPipeline([agent1, agent2, agent3]) -# Create a pipeline that executes agents in parallel +# Create a pipeline that executes agents in ifElsePipeline pipe2 = IfElsePipeline(condition, agent4, agent5) # Create a pipeline that executes pipe1 and pipe2 in order pipe3 = SequentialPipeline([pipe1, pipe2]) @@ -298,4 +298,4 @@ hub.add(new_agent) hub.delete(existing_agent) ``` -[[Return to the top]](#agent-interactions-dive-deeper-into-pipelines-and-message-hub) +[[Return to the top]](#202-pipeline-en) diff --git a/docs/sphinx_doc/en/source/tutorial/203-model.md b/docs/sphinx_doc/en/source/tutorial/203-model.md new file mode 100644 index 000000000..f1a6d7af9 --- /dev/null +++ b/docs/sphinx_doc/en/source/tutorial/203-model.md @@ -0,0 +1,220 @@ +(203-model-en)= + +# Model Service + +In AgentScope, the model deployment and invocation are decoupled by `ModelWrapper`. +Developers can specify their own model by providing model configurations, +and AgentScope also provides scripts to support developers to customize +model services. + +## Supported Models + +Currently, AgentScope supports the following model service APIs: + +- OpenAI API, including Chat, image generation (DALL-E), and Embedding. +- Post Request API, model inference services based on Post + requests, including Huggingface/ModelScope Inference API and various + post request based model APIs. + +## Configuration + +In AgentScope, users specify the model configuration through the +`model_configs` parameter in the `agentscope.init` interface. +`model_configs` can be a **dictionary**, **a list of dictionaries**, or a +**path** to model configuration file. + +```python +import agentscope + +agentscope.init(model_configs=MODEL_CONFIG_OR_PATH) +``` + +An example of `model_configs` is as follows: + +```python +model_configs = [ + { + "config_name": "gpt-4-temperature-0.0", + "model_type": "openai", + "model_name": "gpt-4", + "api_key": "xxx", + "organization": "xxx", + "generate_args": { + "temperature": 0.0 + } + }, + { + "config_name": "dall-e-3-size-1024x1024", + "model_type": "openai_dall_e", + "model_name": "dall-e-3", + "api_key": "xxx", + "organization": "xxx", + "generate_args": { + "size": "1024x1024" + } + }, + # Additional models can be configured here +] +``` + +### Configuration Format + +In AgentScope the model configuration is a dictionary used to specify the type of model and set the call parameters. +We divide the fields in the model configuration into two categories: _basic parameters_ and _detailed parameters_. +Among them, the basic parameters include `config_name` and `model_type`, which are used to distinguish different model configurations and specific `ModelWrapper` types. + +```python +{ + # Basic parameters + "config_name": "gpt-4-temperature-0.0", # Model configuration name + "model_type": "openai", # Correspond to `ModelWrapper` type + + # Detailed parameters + # ... +} +``` + +#### Basic Parameters + +In basic parameters, `config_name` is the identifier of the model configuration, +which we will use to specify the model service when initializing an agent. + +`model_type` corresponds to the type of `ModelWrapper` and is used to specify the type of model service. +It corresponds to the `model_type` field in the `ModelWrapper` class in the source code. + +```python +class OpenAIChatWrapper(OpenAIWrapper): + """The model wrapper for OpenAI's chat API.""" + + model_type: str = "openai" + # ... +``` + +In the current AgentScope, the supported `model_type` types, the corresponding +`ModelWrapper` classes, and the supported APIs are as follows: + +| Task | model_type | ModelWrapper | Supported APIs | +|------------------|--------------------|--------------------------|------------------------------------------------------------| +| Text generation | `openai` | `OpenAIChatWrapper` | Standard OpenAI chat API, FastChat and vllm | +| Image generation | `openai_dall_e` | `OpenAIDALLEWrapper` | DALL-E API for generating images | +| Embedding | `openai_embedding` | `OpenAIEmbeddingWrapper` | API for text embeddings | +| Post Request | `post_api` | `PostAPIModelWrapperBase` | Huggingface/ModelScope Inference API, and customized post API | + +#### Detailed Parameters + +According to the different `ModelWrapper`, the parameters contained in the +detailed parameters are different. However, all detailed parameters will be +used to initialize the instance of the `ModelWrapper` class. Therefore, more +detailed parameter descriptions can be viewed according to the constructor of +their `ModelWrapper` classes. + +- For OpenAI APIs including text generation, image generation, and text embedding, the model configuration parameters are as follows: + +```python +{ + # basic parameters + "config_name": "gpt-4_temperature-0.0", + "model_type": "openai", + + # detailed parameters + # required parameters + "model_name": "gpt-4", # OpenAI model name + + # optional + "api_key": "xxx", # OpenAI API Key, if not provided, it will be read from the environment variable + "organization": "xxx", # Organization name, if not provided, it will be read from the environment variable + "client_args": { # Parameters for initializing the OpenAI API Client + # e.g. "max_retries": 3, + }, + "generate_args": { # Parameters passed to the model when calling + # e.g. "temperature": 0.0 + }, + "budget": 100.0 # API budget +} +``` + +- For post request API, the model configuration parameters are as follows: + +```python +{ + # Basic parameters + "config_name": "gpt-4_temperature-0.0", + "model_type": "post_api", + + # Detailed parameters + "api_url": "http://xxx.png", + "headers": { + # e.g. "Authorization": "Bearer xxx", + }, + + # Optional parameters, need to be configured according to the requirements of the Post request API + "json_args": { + # e.g. "temperature": 0.0 + } + # ... +} +``` + +## Build Model Service from Scratch + +For developers who need to build their own model services, AgentScope +provides some scripts to help developers quickly build model services. +You can find these scripts and instructions in the [scripts](https://github.com/modelscope/agentscope/tree/main/scripts) +directory. + +Specifically, AgentScope provides the following model service scripts: + +- Model service based on **Flask + HuggingFace** +- Model service based on **Flask + ModelScope** +- **FastChat** inference engine +- **vllm** inference engine + +Taking the Flask + Huggingface model service as an example, we will introduce how to use the model service script of AgentScope. +More model service scripts can be found in [scripts](https://github.com/modelscope/agentscope/blob/main/scripts/) directory. + +### Flask-based Model API Serving + +[Flask](https://github.com/pallets/flask) is a lightweight web application framework. It is easy to build a local model API service with Flask. + +#### Using transformers library + +##### Install Libraries and Set up Serving + +Install Flask and Transformers by following the command. + +```bash +pip install Flask transformers +``` + +Taking model `meta-llama/Llama-2-7b-chat-hf` and port `8000` as an example, set up the model API service by running the following command. + +```bash +python flask_transformers/setup_hf_service.py + --model_name_or_path meta-llama/Llama-2-7b-chat-hf + --device "cuda:0" # or "cpu" + --port 8000 +``` + +You can replace `meta-llama/Llama-2-7b-chat-hf` with any model card in the huggingface model hub. + +##### Use in AgentScope + +In AgentScope, you can load the model with the following model configs: [./flask_transformers/model_config.json](https://github.com/modelscope/agentscope/blob/main/scripts/flask_transformers/model_config.json). + +```json +{ + "model_type": "post_api", + "config_name": "flask_llama2-7b-chat", + "api_url": "http://127.0.0.1:8000/llm/", + "json_args": { + "max_length": 4096, + "temperature": 0.5 + } +} +``` + +##### Note + +In this model serving, the messages from post requests should be in **STRING** format. You can use [templates for chat model](https://huggingface.co/docs/transformers/main/chat_templating) from _transformers_ with a little modification based on [`./flask_transformers/setup_hf_service.py`](https://github.com/modelscope/agentscope/blob/main/scripts/flask_transformers/setup_hf_service.py). + +[[Return to Top]](#203-model-en) diff --git a/docs/sphinx_doc/en/source/tutorial/204-service.md b/docs/sphinx_doc/en/source/tutorial/204-service.md new file mode 100644 index 000000000..d77fa674c --- /dev/null +++ b/docs/sphinx_doc/en/source/tutorial/204-service.md @@ -0,0 +1,260 @@ +(204-service-en)= + +# About Service + +Service function is a set of multi-functional utility tools that can be +used to enhance the capabilities of agents, such as executing Python code, +web search, file operations, and more. +This tutorial provides an overview of the service functions available in +AgentScope and how to use them to enhance the capabilities of your agents. + +## Built-in Service Functions + +The following table outlines the various Service functions by type. These functions can be called using `agentscope.service.{function_name}`. + +| Service Scene | Service Function Name | Description | +| -------------- | --------------------- | ------------------------------------------------------------ | +| Code | `execute_python_code` | Execute a piece of Python code, optionally inside a Docker container. | +| Retrieval | `retrieve_from_list` | Retrieve a specific item from a list based on given criteria. | +| SQL Query | `query_mysql` | Execute SQL queries on a MySQL database and return results. | +| | `query_sqlite` | Execute SQL queries on a SQLite database and return results. | +| | `query_mongodb` | Perform queries or operations on a MongoDB collection. | +| Text Processing | `summarization` | Summarize a piece of text using a large language model to highlight its main points. | +| Web Search | `web_search` | Perform a web search using a specified search engine (currently supports Google and Bing). | +| File | `create_file` | Create a new file at a specified path, optionally with initial content. | +| | `delete_file` | Delete a file specified by a file path. | +| | `move_file` | Move or rename a file from one path to another. | +| | `create_directory` | Create a new directory at a specified path. | +| | `delete_directory` | Delete a directory and all its contents. | +| | `move_directory` | Move or rename a directory from one path to another. | +| | `read_text_file` | Read and return the content of a text file. | +| | `write_text_file` | Write text content to a file at a specified path. | +| | `read_json_file` | Read and parse the content of a JSON file. | +| | `write_json_file` | Serialize a Python object to JSON and write to a file. | +| *More services coming soon* | | More service functions are in development and will be added to AgentScope to further enhance its capabilities. | + +About each service function, you can find detailed information in the +[API document](https://modelscope.github.io/agentscope/). + +## How to use Service Functions + +AgentScope provides two service classes for Service functions, +`ServiceFactory` and `ServiceResponse`. + +- `ServiceFactory` is mainly used to convert general Python functions into + a form that can be directly used by large-scale models, and automatically + generate function descriptions in JSON schema format. +- `ServiceResponse` is a subclass of a dictionary, providing a unified call + result interface for all Service functions. + +### About Service Factory + +The tools used by agents are generally of the function type. Developers +need to prepare functions that can be called directly by large models, and +provide descriptions of the functions. However, general functions often +require developers to provide some parameters (such as keys, usernames, +specific URLs, etc.), and then the large model can use them. At the same +time, it is also a tedious task to generate specific format descriptions +for multiple functions. + +To tackle the above problems, AgentScope introduces `ServiceFactory`. For a +given Service function, it allows developers to specify some parameters, +generate a function that can be called directly by large models, and +automatically generate function descriptions based on the Docstring. Take +the Bing web search function as an example. + +```python +def bing_search( + question: str, + api_key: str, + num_results: int = 10, + **kwargs: Any, +) -> ServiceResponse: + """ + Search question in Bing Search API and return the searching results + + Args: + question (`str`): + The search query string. + api_key (`str`): + The API key provided for authenticating with the Bing Search API. + num_results (`int`, defaults to `10`): + The number of search results to return. + **kwargs (`Any`): + Additional keyword arguments to be included in the search query. + For more details, please refer to + https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters + + [omitted for brevity] + """ +``` + +In the above function, `question` is the field filled in by the large model, +while `api_key` and `num_results` are the parameters that the developer needs to provide. +We use the `get` function of `ServiceFactory` to process it: + +```python +from agentscope.service import ServiceFactory + +func, func_intro = ServiceFactory.get( + bing_search, + api_key="xxx", + num_results=3) +``` + +In the above code, the `func` generated by ServiceFactory is equivalent to the following function: + +```python +def bing_search(question: str) -> ServiceResponse: + """ + Search question in Bing Search API and return the searching results + + Args: + question (`str`): + The search query string. + """ + return bing_search(question, api_key="xxx", num_results=3) +``` + +The generated JSON schema format is as follows, which can be directly used +in the `tools` field of the OpenAI API. + +```python +# print(func_intro) +{ + "type": "function", + "function": { + "name": "bing_search", + "description": "Search question in Bing Search API and return the searching results", + "parameters": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "The search query string." + } + }, + "required": [ + "question" + ] + } + } +} +``` + +**Note**: +The description of the function and arguments are extracted from +its docstring automatically, which should be well-formatted in +**Google style**. Otherwise, their descriptions in the returned +dictionary will be empty. + +**Suggestions**: + +1. The name of the service function should be self-explanatory, +so that the agent can understand the function and use it properly. +2. The typing of the arguments should be provided when defining +the function (e.g. `def func(a: int, b: str, c: bool)`), so that +the agent can specify the arguments properly. + +### About ServiceResponse + +`ServiceResponse` is a wrapper for the execution results of the services, +containing two fields, `status` and `content`. When the Service function +runs to completion normally, `status` is `ServiceExecStatus.SUCCESS`, and +`content` is the return value of the function. When an error occurs during +execution, `status` is `ServiceExecStatus.Error`, and `content` contains +the error message. + +```python +class ServiceResponse(dict): + """Used to wrap the execution results of the services""" + + __setattr__ = dict.__setitem__ + __getattr__ = dict.__getitem__ + + def __init__( + self, + status: ServiceExecStatus, + content: Any, + ): + """Constructor of ServiceResponse + + Args: + status (`ServiceExeStatus`): + The execution status of the service. + content (`Any`) + If the argument`status` is `SUCCESS`, `content` is the + response. We use `object` here to support various objects, + e.g. str, dict, image, video, etc. + Otherwise, `content` is the error message. + """ + self.status = status + self.content = content + + # [omitted for brevity] +``` + +## Example + +```python +import json +import inspect +from agentscope.service import ServiceResponse +from agentscope.agents import AgentBase + + +def create_file(file_path: str, content: str = "") -> ServiceResponse: + """ + Create a file and write content to it. + + Args: + file_path (str): The path to the file to be created. + content (str): The content to be written to the file. + + Returns: + ServiceResponse: A boolean indicating success or failure, and a + string containing any error message (if any), including the error type. + """ + # ... [omitted for brevity] + + +class YourAgent(AgentBase): + # ... [omitted for brevity] + + def reply(self, x: dict = None) -> dict: + # ... [omitted for brevity] + + # construct a prompt to ask the agent to provide the parameters in JSON format + prompt = ( + f"To complete the user request\n```{x['content']}```\n" + "Please provide the necessary parameters in JSON format for the " + "function:\n" + f"Function: {create_file.__name__}\n" + "Description: Create a file and write content to it.\n" + ) + + # add detailed information about the function parameters + sig = inspect.signature(create_file) + parameters = sig.parameters.items() + params_prompt = "\n".join( + f"- {name} ({param.annotation.__name__}): " + f"{'(default: ' + json.dumps(param.default) + ')'if param.default is not inspect.Parameter.empty else ''}" + for name, param in parameters + ) + prompt += params_prompt + + # get the model response + model_response = self.model(prompt).text + + # parse the model response and call the create_file function + try: + kwargs = json.loads(model_response) + create_file(**kwargs) + except: + # Error handling + pass + + # ... [omitted for brevity] +``` + +[[Return to Top]](#204-service-en) diff --git a/docs/sphinx_doc/source/tutorial/205-memory.md b/docs/sphinx_doc/en/source/tutorial/205-memory.md similarity index 51% rename from docs/sphinx_doc/source/tutorial/205-memory.md rename to docs/sphinx_doc/en/source/tutorial/205-memory.md index 728bc1f29..baa95589a 100644 --- a/docs/sphinx_doc/source/tutorial/205-memory.md +++ b/docs/sphinx_doc/en/source/tutorial/205-memory.md @@ -1,17 +1,48 @@ -(205-memory)= +(205-memory-en)= + +# About Memory + +In AgentScope, memory is used to store historical information, allowing the +agent to provide more coherent and natural responses based on context. +This tutorial will first introduce the carrier of information in memory, +message, and then introduce the functions and usage of the memory module in +AgentScope. + +## About Message + +### `MessageBase` Class + +In AgentScope, the message base class is a subclass of Python dictionary, +consisting of two required fields (`name` and `content`) and an optional +field (`url`). +Specifically, the `name` field represents the originator of the message, +the `content` field represents the content of the message, and the `url` +field represents the data link attached to the message, which can be a +local link to multi-modal data or a web link. +As a dictionary type, developers can also add other fields +as needed. When a message is created, a unique ID is automatically +generated to identify the message. The creation time of the message is also +automatically recorded in the form of a timestamp. + +In the specific implementation, AgentScope first provides a `MessageBase` +base class to define the basic properties and usage of messages. +Unlike general dictionary types, the instantiated objects of `MessageBase` +can access attribute values through `object_name.{attribute_name}` or +`object_name['attribute_name']`. +The key attributes of the `MessageBase` class are as follows: -# Memory and Message Management - -**Message** represents individual pieces of information or interactions flowing between/within agents. **Memory** refers to the storage and retrieval of historical information and serves as the storage and management system for the messages. This allows the agent to remember past interactions, maintain context, and provide more coherent and relevant responses. - -## Understanding `MessageBase` and its subclasses - -### `MessageBase` - -`MessageBase` is designed to organize attributes of a message, like the agent's name, the content, and associated media URLs. It provides a structure that can be extended to create specific types of messages. +- **`name`**: This attribute denotes the originator of the message. It's a critical piece of metadata, useful in scenarios where distinguishing between different speakers is necessary. +- **`content`**: The substance of the message itself. It can include text, structured data, or any other form of content that is relevant to the interaction and requires processing by the agent. +- **`url`**: An optional attribute that allows the message to be linked to external resources. These can be direct links to files, multi-modal data, or web pages. +- **`timestamp`**: A timestamp indicating when the message was created. +- **`id`**: Each message is assigned a unique identifier (ID) upon creation. ```python class MessageBase(dict): + """Base Message class, which is used to maintain information for dialog, + memory and used to construct prompt. + """ + def __init__( self, name: str, @@ -20,68 +51,114 @@ class MessageBase(dict): timestamp: Optional[str] = None, **kwargs: Any, ) -> None: + """Initialize the message object + + Args: + name (`str`): + The name of who send the message. It's often used in + role-playing scenario to tell the name of the sender. + However, you can also only use `role` when calling openai api. + The usage of `name` refers to + https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models. + content (`Any`): + The content of the message. + url (`Optional[Union[list[str], str]]`, defaults to None): + A url to file, image, video, audio or website. + timestamp (`Optional[str]`, defaults to None): + The timestamp of the message, if None, it will be set to + current time. + **kwargs (`Any`): + Other attributes of the message. For OpenAI API, you should + add "role" from `["system", "user", "assistant", "function"]`. + When calling OpenAI API, `"role": "assistant"` will be added + to the messages that don't have "role" attribute. + + """ + # id and timestamp will be added to the object as its attributes + # rather than items in dict self.id = uuid4().hex - self.timestamp = timestamp or _get_timestamp() + if timestamp is None: + self.timestamp = _get_timestamp() + else: + self.timestamp = timestamp + self.name = name self.content = content - self.url = url + + if url: + self.url = url + self.update(kwargs) + def __getattr__(self, key: Any) -> Any: + try: + return self[key] + except KeyError as e: + raise AttributeError(f"no attribute '{key}'") from e + + def __setattr__(self, key: Any, value: Any) -> None: + self[key] = value + + def __delattr__(self, key: Any) -> None: + try: + del self[key] + except KeyError as e: + raise AttributeError(f"no attribute '{key}'") from e + def to_str(self) -> str: + """Return the string representation of the message""" raise NotImplementedError def serialize(self) -> str: + """Return the serialized message.""" raise NotImplementedError - # ... [code omitted for brevity] + # ... [省略代码以简化] ``` -Here are the key attributes managed by the `MessageBase` class: - -- **`name`**: This attribute denotes the originator of the message. It's a critical piece of metadata, useful in scenarios where distinguishing between different speakers is necessary. -- **`content`**: The substance of the message itself. It can include text, structured data, or any other form of content that is relevant to the interaction and requires processing by the agent. -- **`url`**: An optional attribute that allows the message to be linked to external resources. These can be direct links to files, multi-modal data, or web pages. -- **`timestamp`**: A timestamp indicating when the message was created. -- **`id`**: Each message is assigned a unique identifier (ID) upon creation. - -### `Msg` +### `Msg` Class -The `Msg` ("Message") subclass extends `MessageBase` and represents a standard *message*. `Msg` provides concrete definitions for the `to_str` and `serialize` methods to enable string representation and serialization suitable for the agent's operational context. +`Msg` class extends `MessageBase` and represents a standard *message*. +`Msg` provides concrete definitions for the `to_str` and `serialize` +methods to enable string representation and serialization suitable for the +agent's operational context. +Within an `Agent` class, its `reply` function typically returns an instance of +`Msg` to facilitate message passing within AgentScope. ```python class Msg(MessageBase): - # ... [code omitted for brevity] + """The Message class.""" + + def __init__( + self, + name: str, + content: Any, + url: Optional[Union[Sequence[str], str]] = None, + timestamp: Optional[str] = None, + echo: bool = False, + **kwargs: Any, + ) -> None: + super().__init__( + name=name, + content=content, + url=url, + timestamp=timestamp, + **kwargs, + ) + if echo: + logger.chat(self) def to_str(self) -> str: + """Return the string representation of the message""" return f"{self.name}: {self.content}" def serialize(self) -> str: return json.dumps({"__type": "Msg", **self}) - -# `Msg` logs ->> Someone: I should ... -``` - -### `Tht` - -The `Tht` ("Thought") subclass is a specialized form of `MessageBase` used for encapsulating processes of an agent's internal thought. The thought is not sent outwardly but is instead used internally by the agent. As with `Msg`, specific implementations of `Tht` will define `to_str` and `serialize` methods to handle the unique requirements of representing and serializing an agent's thoughts. - -```python -class Tht(MessageBase): - # ... [code omitted for brevity] - - def to_str(self) -> str: - return f"{self.name} thought: {self.content}" - - def serialize(self) -> str: - return json.dumps({"__type": "Tht", **self}) - ->> Someone thought: I should ... ``` -## Understanding `MemoryBase` and its subclasses +## About Memory -### `MemoryBase` +### `MemoryBase` Class `MemoryBase` is an abstract class that handles an agent's memory in a structured way. It defines operations for storing, retrieving, deleting, and manipulating *message*'s content. @@ -143,4 +220,4 @@ The `TemporaryMemory` class is a concrete implementation of `MemoryBase`, provid For more details about the usage of `Memory` and `Msg`, please refer to the API references. -[[Return to the top]](#memory-and-message-management) +[[Return to the top]](#205-memory-en) diff --git a/docs/sphinx_doc/source/tutorial/206-prompt.md b/docs/sphinx_doc/en/source/tutorial/206-prompt.md similarity index 80% rename from docs/sphinx_doc/source/tutorial/206-prompt.md rename to docs/sphinx_doc/en/source/tutorial/206-prompt.md index de580de47..570b5c7bd 100644 --- a/docs/sphinx_doc/source/tutorial/206-prompt.md +++ b/docs/sphinx_doc/en/source/tutorial/206-prompt.md @@ -1,17 +1,24 @@ -(206-prompt)= +(206-prompt-en)= # Prompt Engine -**Prompt** is a crucial component in interacting with language models, especially when seeking to generate specific types of outputs or guide the model toward desired behaviors. This tutorial will guide you through the use of the `PromptEngine` class, which simplifies the process of crafting prompts for LLMs. +**Prompt** is a crucial component in interacting with language models, +especially when seeking to generate specific types of outputs or guide the +model toward desired behaviors. +AgentScope allows developers to customize prompts according to their needs, +and provides the `PromptEngine` class to simplify the process of crafting +prompts for large language models (LLMs). +This tutorial will guide you through the +use of the `PromptEngine` class, which simplifies the process of crafting +prompts for LLMs. -## Understanding the `PromptEngine` Class +## About `PromptEngine` Class The `PromptEngine` class provides a structured way to combine different components of a prompt, such as instructions, hints, dialogue history, and user inputs, into a format that is suitable for the underlying language model. ### Key Features of PromptEngine - **Model Compatibility**: It works with any `ModelWrapperBase` subclass. -- **Shrink Policy**: It offers two policies for handling prompts that exceed the maximum length: `ShrinkPolicy.TRUNCATE` to simply truncate the prompt, and `ShrinkPolicy.SUMMARIZE` to summarize part of the dialog history to save space. - **Prompt Type**: It supports both string and list-style prompts, aligning with the model's preferred input format. ### Initialization @@ -63,4 +70,4 @@ hint_prompt = "Find the weather in {location}." prompt = engine.join(system_prompt, user_input, hint_prompt, format_map=variables) ``` -[[Return to the top]](#prompt-engine) +[[Return to the top]](#206-prompt-en) diff --git a/docs/sphinx_doc/source/tutorial/207-monitor.md b/docs/sphinx_doc/en/source/tutorial/207-monitor.md similarity index 98% rename from docs/sphinx_doc/source/tutorial/207-monitor.md rename to docs/sphinx_doc/en/source/tutorial/207-monitor.md index d3dc08f2c..e43f67b4f 100644 --- a/docs/sphinx_doc/source/tutorial/207-monitor.md +++ b/docs/sphinx_doc/en/source/tutorial/207-monitor.md @@ -1,4 +1,4 @@ -(207-monitor)= +(207-monitor-en)= # Monitor @@ -169,4 +169,4 @@ except QuotaExceededError as e: > **Note:** This feature is still in the experimental stage and only supports some specified APIs, which are listed in `agentscope.utils.monitor._get_pricing`. -[[Return to the top]](#monitoring-and-logging) +[[Return to the top]](#207-monitor-en) diff --git a/docs/sphinx_doc/en/source/tutorial/208-distribute.md b/docs/sphinx_doc/en/source/tutorial/208-distribute.md new file mode 100644 index 000000000..29273fd46 --- /dev/null +++ b/docs/sphinx_doc/en/source/tutorial/208-distribute.md @@ -0,0 +1,156 @@ +(208-distribute-en)= + +# About Distribution + +AgentScope implements an Actor-based distributed deployment and parallel optimization, providing the following features: + +- **Automatic Parallel Optimization**: Automatically optimize the application for parallelism at runtime without additional optimization costs; +- **Centralized Application Writing**: Easily orchestrate distributed application flow without distributed background knowledge; +- **Zero-Cost Automatic Migration**: Centralized Multi-Agent applications can be easily converted to distributed mode + +This tutorial will introduce the implementation and usage of AgentScope distributed in detail. + +## Usage + +In AgentScope, the process that runs the application flow is called the "main process", and all agents will run in separate processes. +According to the different relationships between the main process and the agent process, AgentScope supports two distributed modes: Master-Slave and Peer-to-Peer mode. +In the Master-Slave mode, developers can start all agent processes from the main process, while in the Peer-to-Peer mode, the agent process is independent of the main process and developers need to start the agent service on the corresponding machine. + +The above concepts may seem complex, but don't worry, for application developers, they only have minor differences when creating agents. Below we introduce how to create distributed agents. + +### Step 1: Create a Distributed Agent + +First, the developer's agent must inherit the `agentscope.agents.AgentBase` class. `AgentBase` provides the `to_dist` method to convert the agent into its distributed version. `to_dist` mainly relies on the following parameters to implement the distributed deployment of the agent: + +- `host`: the hostname or IP address of the machine where the agent runs, defaults to `localhost`. +- `port`: the port of this agent's RPC server, defaults to `80`. +- `launch_server`: whether to launch an RPC server locally, defaults to `True`. + +Suppose there are two agent classes `AgentA` and `AgentB`, both of which inherit from `AgentBase`. + +#### Master-Slave Mode + +In the Master-Slave mode, since all agent processes depend on the main process, all processes actually run on the same machine. +We can start all agent processes from the main process, that is, the default parameters `launch_server=True` and `host="localhost"`, and we can omit the `port` parameter. AgentScope will automatically find an available local port for the agent process. + +```python +a = AgentA( + name="A" + # ... +).to_dist() +``` + +#### Peer-to-Peer Mode + +In the Peer-to-Peer mode, we need to start the service of the corresponding agent on the target machine first. For example, deploy an instance of `AgentA` on the machine with IP `a.b.c.d`, and its corresponding port is 12001. Run the following code on this target machine: + +```python +from agentscope.agents import RpcAgentServerLauncher + +# Create an agent service process +server_a = RpcAgentServerLauncher( + agent_class=AgentA, + agent_kwargs={ + "name": "A" + ... + }, + host="a.b.c.d", + port=12001, +) + +# Start the service +server_a.launch() +server_a.wait_until_terminate() +``` + +Then, we can connect to the agent service in the main process with the following code. At this time, the object `a` created in the main process can be used as a local proxy for the agent, allowing developers to write the application flow in a centralized way in the main process. + +```python +a = AgentA( + name="A", + # ... +).to_dist( + host="a.b.c.d", + port=12001, + launch_server=False, +) +``` + +### Step 2: Orchestrate Distributed Application Flow + +In AgentScope, the orchestration of distributed application flow is exactly the same as non-distributed programs, and developers can write the entire application flow in a centralized way. +At the same time, AgentScope allows the use of a mixture of locally and distributed deployed agents, and developers do not need to distinguish which agents are local and which are distributed. + +The following is the complete code for two agents to communicate with each other in different modes. It can be seen that AgentScope supports zero-cost migration of distributed application flow from centralized to distributed. + +- All agents are centralized: + +```python +# Create agent objects +a = AgentA( + name="A", + # ... +) + +b = AgentB( + name="B", + # ... +) + +# Application flow orchestration +x = None +while x is None or x.content == "exit": + x = a(x) + x = b(x) +``` + +- Agents are deployed in a distributed manner (Master-Slave mode): + +```python +# Create agent objects +a = AgentA( + name="A" + # ... +).to_dist() + +b = AgentB( + name="B", + # ... +).to_dist() + +# Application flow orchestration +x = None +while x is None or x.content == "exit": + x = a(x) + x = b(x) +``` + +### About Implementation + +#### Actor Model + +[The Actor model](https://en.wikipedia.org/wiki/Actor_model) is a widely used programming paradigm in large-scale distributed systems, and it is also applied in the distributed design of the AgentScope platform. + +In the distributed mode of AgentScope, each Agent is an Actor and interacts with other Agents through messages. The flow of messages implies the execution order of the Agents. Each Agent has a `reply` method, which consumes a message and generates another message, and the generated message can be sent to other Agents. For example, the following chart shows the workflow of multiple Agents. `A`~`F` are all Agents, and the arrows represent messages. + +```{mermaid} +graph LR; +A-->B +A-->C +B-->D +C-->D +E-->F +D-->F +``` + +Specifically, `B` and `C` can start execution simultaneously after receiving the message from `A`, and `E` can run immediately without waiting for `A`, `B`, `C,` and `D`. +By implementing each Agent as an Actor, an Agent will automatically wait for its input `Msg` before starting to execute the `reply` method, and multiple Agents can also automatically execute `reply` at the same time if their input messages are ready, which avoids complex parallel control and makes things simple. + +#### PlaceHolder + +Meanwhile, to support centralized application orchestration, AgentScope introduces the concept of Placeholder. A Placeholder is a special message that contains the address and port number of the agent that generated the Placeholder, which is used to indicate that the input message of the Agent is not ready yet. +When the input message of the Agent is ready, the Placeholder will be replaced by the real message, and then the actual `reply` method will be executed. + +About more detailed technical implementation solutions, please refer to our [paper](https://arxiv.org/abs/2402.14034). + +[[Back to the top]](#208-distribute-en) diff --git a/docs/sphinx_doc/source/tutorial/301-community.md b/docs/sphinx_doc/en/source/tutorial/301-community.md similarity index 96% rename from docs/sphinx_doc/source/tutorial/301-community.md rename to docs/sphinx_doc/en/source/tutorial/301-community.md index 38c0c0140..438bbd49a 100644 --- a/docs/sphinx_doc/source/tutorial/301-community.md +++ b/docs/sphinx_doc/en/source/tutorial/301-community.md @@ -1,4 +1,4 @@ -(301-community)= +(301-community-en)= # Joining The AgentScope Community @@ -31,4 +31,4 @@ Scan the QR code below on Wechat to join: NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/sphinx_doc/source/tutorial/102-concepts.md b/docs/sphinx_doc/source/tutorial/102-concepts.md deleted file mode 100644 index 745c2cc50..000000000 --- a/docs/sphinx_doc/source/tutorial/102-concepts.md +++ /dev/null @@ -1,46 +0,0 @@ -(102-concepts)= - -# Fundamental Concepts - -In this tutorial, you'll have an initial understanding of the **fundamental concepts** of AgentScope. We will focus on how a multi-agent application runs based on our platform and familiarize you with the essential terms. Let's get started! - -![Concepts](https://img.alicdn.com/imgextra/i1/O1CN01ELiTw41KGKqTmWZua_!!6000000001136-2-tps-756-598.png) - -## Essential Terms and Concepts - -* **Agent** refers to an autonomous entity capable of performing actions to achieve specific objectives (probably powered by LLMs). In AgentScope, an agent takes the message as input and generates a corresponding response message. Agents can interact with each other to simulate human-like behaviors (e.g., discussion or debate) and cooperate to finish complicated tasks (e.g., generate runnable and reliable code). -* **Message** is a carrier of communication information among agents. It encapsulates information that needs to be conveyed, such as instructions, multi-modal data, or status updates. In AgentScope, a message is a subclass of Python's dict with additional features for inter-agent communication, including fields such as `name` and `content` for identification and payload delivery. -* **Memory** refers to the structures (e.g., list-like memory, database-based memory) used to store and manage `Msg` (Message) that agents need to remember and store. This can include chat history, knowledge, or other data that informs the agent's future actions. -* **Service** is a collection of functionality tools (e.g., web search, code interpreter, file processing) that provide specific capabilities or processes that are independent of an agent's memory state. Services can be invoked by agents or other components and designed to be reusable across different scenarios. -* **Pipeline** refers to the interaction order or pattern of agents in a task. AgentScope provides built-in `pipelines` to streamline the process of collaboration across multiple agents, such as `SequentialPipeline` and `ForLoopPipeline`. When a `Pipeline` is executed, the *message* passes from predecessors to successors with intermediate results for the task. - -## Code Structure - -```bash -AgentScope -├── src -│ ├── agentscope -│ | ├── agents # Core components and implementations pertaining to agents. -│ | ├── configs # Configurations that can be customized for the application's needs. -│ | ├── memory # Structures for agent memory. -│ | ├── models # Interfaces for integrating diverse model APIs. -│ | ├── pipeline # Fundamental components and implementations for running pipelines. -│ | ├── rpc # Rpc module for agent distributed deployment. -│ | ├── service # Services offering functions independent of memory and state. -│ | ├── utils # Auxiliary utilities and helper functions. -│ | ├── message.py # Definitions and implementations of messaging between agents. -| | ├── web # WebUI used to show dialogs. -│ | ├── prompt.py # Prompt engineering module for model input. -│ | ├── ... .. -│ | ├── ... .. -├── scripts # Scripts for launching local Model API -├── examples # Pre-built examples of different applications. -├── docs # Documentation tool for API reference. -├── tests # Unittest modules for continuous integration. -├── LICENSE # The official licensing agreement for AgentScope usage. -└── setup.py # Setup script for installing. -├── ... .. -└── ... .. -``` - -[[Return to the top]](#fundamental-concepts) diff --git a/docs/sphinx_doc/source/tutorial/203-model.md b/docs/sphinx_doc/source/tutorial/203-model.md deleted file mode 100644 index dc83ff758..000000000 --- a/docs/sphinx_doc/source/tutorial/203-model.md +++ /dev/null @@ -1,256 +0,0 @@ -(203-model)= - -# Using Different Model Sources with Model API - -AgentScope allows for the integration of multi-modal models from various sources. The core step is the initialization process, where once initialized with a certain config, all agent instances globally select the appropriate model APIs based on the model name specified (e.g., `model='gpt-4'`): - -```python -import agentscope - -agentscope.init(model_configs=PATH_TO_MODEL_CONFIG) -``` - -where the model configs could be a list of dict: - -```json -[ - { - "config_name": "gpt-4-temperature-0.0", - "model_type": "openai", - "model_name": "gpt-4", - "api_key": "xxx", - "organization": "xxx", - "generate_args": { - "temperature": 0.0 - } - }, - { - "config_name": "dall-e-3-size-1024x1024", - "model_type": "openai_dall_e", - "model_name": "dall-e-3", - "api_key": "xxx", - "organization": "xxx", - "generate_args": { - "size": "1024x1024" - } - }, - // Additional models can be configured here -] -``` - -This allows users to configure the model once, enabling shared use across all agents within the multi-agent application. Here is a table outlining the supported APIs and the type of arguments required for each: - -| Model Usage | Type Argument in AgentScope | Supported APIs | -| -------------------- | ------------------ | ------------------------------------------------------------ | -| Text generation | `openai` | Standard OpenAI chat API, FastChat and vllm | -| Image generation | `openai_dall_e` | DALL-E API for generating images | -| Embedding | `openai_embedding` | API for text embeddings | -| General usages in POST | `post_api` | Huggingface/ModelScope Inference API, and customized post API | - -## Standard OpenAI API - -Our configuration is fully compatible with the Standard OpenAI API. For specific parameter configuration and usage guides, we recommend visiting their official website: [https://platform.openai.com/docs/api-reference/introduction](https://platform.openai.com/docs/api-reference/introduction). - -## Self-host Model API - -In AgentScope, in addition to OpenAI API, we also support open-source models with post-request API. In this document, we will introduce how to fast set up local model API serving with different inference engines. - -### Flask-based Model API Serving - -[Flask](https://github.com/pallets/flask) is a lightweight web application framework. It is easy to build a local model API serving with Flask. - -Here we provide two Flask examples with Transformers and ModelScope libraries, respectively. You can build your own model API serving with a few modifications. - -#### With Transformers Library - -##### Install Libraries and Set up Serving - -Install Flask and Transformers by following the command. - -```bash -pip install Flask, transformers -``` - -Taking model `meta-llama/Llama-2-7b-chat-hf` and port `8000` as an example, set up the model API serving by running the following command. - -```bash -python flask_transformers/setup_hf_service.py - --model_name_or_path meta-llama/Llama-2-7b-chat-hf - --device "cuda:0" # or "cpu" - --port 8000 -``` - -You can replace `meta-llama/Llama-2-7b-chat-hf` with any model card in the huggingface model hub. - -##### How to use in AgentScope - -In AgentScope, you can load the model with the following model configs: `./flask_transformers/model_config.json`. - -```json -{ - "model_type": "post_api", - "config_name": "flask_llama2-7b-chat", - "api_url": "http://127.0.0.1:8000/llm/", - "json_args": { - "max_length": 4096, - "temperature": 0.5 - } -} -``` - -##### Note - -In this model serving, the messages from post requests should be in **STRING** format. You can use [templates for chat model](https://huggingface.co/docs/transformers/main/chat_templating) from *transformers* with a little modification based on `./flask_transformers/setup_hf_service.py`. - -#### With ModelScope Library - -##### Install Libraries and Set up Serving - -Install Flask and modelscope by following the command. - -```bash -pip install Flask, modelscope -``` - -Taking model `modelscope/Llama-2-7b-ms` and port `8000` as an example, to set up the model API serving, run the following command. - -```bash -python flask_modelscope/setup_ms_service.py - --model_name_or_path modelscope/Llama-2-7b-ms - --device "cuda:0" # or "cpu" - --port 8000 -``` - -You can replace `modelscope/Llama-2-7b-ms` with any model card in modelscope model hub. - -##### How to use AgentScope - -In AgentScope, you can load the model with the following model configs: `flask_modelscope/model_config.json`. - -```json -{ - "model_type": "post_api", - "config_name": "flask_llama2-7b-ms", - "api_url": "http://127.0.0.1:8000/llm/", - "json_args": { - "max_length": 4096, - "temperature": 0.5 - } -} -``` - -##### Note - -Similar to the example of transformers, the messages from post requests should be in **STRING format**. - -### FastChat - -[FastChat](https://github.com/lm-sys/FastChat) is an open platform that provides a quick setup for model serving with OpenAI-compatible RESTful APIs. - -#### Install Libraries and Set up Serving - -To install FastChat, run - -```bash -pip install "fastchat[model_worker,webui]" -``` - -Taking model `meta-llama/Llama-2-7b-chat-hf` and port `8000` as an example, to set up model API serving, run the following command to set up model serving. - -```bash -bash fastchat_script/fastchat_setup.sh -m meta-llama/Llama-2-7b-chat-hf -p 8000 -``` - -#### Supported Models - -Refer to [supported model list](https://github.com/lm-sys/FastChat/blob/main/docs/model_support.md#supported-models) of FastChat. - -#### How to use in AgentScope - -Now you can load the model in AgentScope by the following model config: `fastchat_script/model_config.json`. - -```json -{ - "config_name": "meta-llama/Llama-2-7b-chat-hf", - "model_type": "openai", - "api_key": "EMPTY", - "client_args": { - "base_url": "http://127.0.0.1:8000/v1/" - }, - "generate_args": { - "temperature": 0.5 - } -} -``` - -### vllm - -[vllm](https://github.com/vllm-project/vllm) is a high-throughput inference and serving engine for LLMs. - -#### Install Libraries and Set up Serving - -To install vllm, run - -```bash -pip install vllm -``` - -Taking model `meta-llama/Llama-2-7b-chat-hf` and port `8000` as an example, to set up model API serving, run - -```bash -bash vllm_script/vllm_setup.sh -m meta-llama/Llama-2-7b-chat-hf -p 8000 -``` - -#### Supported models - -Please refer to the [supported models list](https://docs.vllm.ai/en/latest/models/supported_models.html) of vllm. - -#### How to use in AgentScope - -Now you can load the model in AgentScope by the following model config: `vllm_script/model_config.json`. - -```json -{ - "config_name": "meta-llama/Llama-2-7b-chat-hf", - "model_type": "openai", - "api_key": "EMPTY", - "client_args": { - "base_url": "http://127.0.0.1:8000/v1/" - }, - "generate_args": { - "temperature": 0.5 - } -} -``` - -## Model Inference API - -Both [Huggingface](https://huggingface.co/docs/api-inference/index) and [ModelScope](https://www.modelscope.cn) provide model inference API, which can be used with AgentScope post API model wrapper. -Taking `gpt2` in HuggingFace inference API as an example, you can use the following model config in AgentScope. - -```json -{ - "config_name": "gpt2", - "model_type": "post_api", - "headers": { - "Authorization": "Bearer {YOUR_API_TOKEN}" - } - "api_url": "https://api-inference.huggingface.co/models/gpt2" -} -``` - -## In-memory Models without API - -It is entirely possible to use models without setting up an API service. Here's an example of how to initialize an agent with a local model instance: - -```python -from transformers import AutoModelForCausalLM, AutoTokenizer - -model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) -tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) -model.eval() -# Do remember to re-implement the `reply` method to tokenize *message*! -agent = YourAgent(name='agent', model_config_name=config_name, tokenizer=tokenizer) -``` - -[[Return to the top]](#using-different-model-sources-with-model-api) diff --git a/docs/sphinx_doc/source/tutorial/204-service.md b/docs/sphinx_doc/source/tutorial/204-service.md deleted file mode 100644 index 9703171a7..000000000 --- a/docs/sphinx_doc/source/tutorial/204-service.md +++ /dev/null @@ -1,135 +0,0 @@ -(204-service)= - -# Enhancing Agent Capabilities with Service Functions - -**Service functions**, often referred to simply as **Service**, constitute a versatile suite of utility tools that can be used to enhance the functionality of agents. A service is designed to perform a specific task like web search, code interpretation, or file processing. Services can be invoked by agents and other components for reuse across different scenarios. - -## ServiceResponse - -The design behind `Service` distinguishes them from typical Python functions. In scenarios where execution is failed, service functions do not raise exceptions within the program. Instead, they return a `ServiceResponse` (a sub-class of dict). - -```python -def demo_service() -> ServiceResponse: - #do some specifc actions - # ...... - res = ServiceResponse({status=status, content=content}) - return res - - -class ServiceResponse(dict): - """Used to wrap the execution results of the services""" - # ... [code omitted for brevity] - - def __init__( - self, - status: ServiceExecStatus, - content: Any, - ): - self.status = status - self.content = content -``` - -This object encapsulates `status` of the execution (`SUCCESS` or `ERROR`), which can indicate success or failure, and the `content`, which can either be the output of a successful execution or the error stack from a failure. - -Here's why this design is beneficial: - -- **Error Handling**: `Service` and `ServiceResponse` allows agents to flexibly handle errors. An agent can check the status of the response and decide on the next steps, whether to retry the operation, use fallback logic, or analyze the error stack and choose an appropriate strategy to make improvements. -- **Consistency**: Service functions provide a consistent interface for both successful outcomes and errors. This consistency simplifies the interaction model for agents that use these services. - -## Overview of Service Functions - -Below is a table outlining various service functions categorized by their primary domain. These services offer a range of capabilities to agents. - -| Service Scenario | Service Function Name | Description | -| --------------- | --------------------- | ------------------------------------------------------------ | -| Code | `execute_python_code` | Execute a string of Python code, optionally inside a Docker container. | -| Retrieval | `retrieve_from_list` | Retrieve specific items from a list based on given criteria. | -| SQL Query | `query_mysql` | Execute a SQL query against a MySQL database and return results. | -| | `query_sqlite` | Execute a SQL query against a SQLite database and return results. | -| | `query_mongodb` | Perform a query or operation against a MongoDB collection. | -| Text Processing | `summarization` | Summarize a block of text to highlight the main points with LLM. | -| Web Search | `web_search` | Perform a web search using a specified search engine (currently supports Google and Bing). | -| File | `create_file` | Create a new file at a specified path with optional initial content. | -| | `delete_file` | Delete a file specified by the file path. | -| | `move_file` | Move or rename a file from one path to another. | -| | `create_directory` | Create a new directory at a specified path. | -| | `delete_directory` | Delete a directory and all of its contents. | -| | `move_directory` | Move or rename a directory from one path to another. | -| | `read_text_file` | Read and return the contents of a text file. | -| | `write_text_file` | Write text content to a file at a specified path. | -| | `read_json_file` | Read and parse the contents of a JSON file. | -| | `write_json_file` | Serialize a Python object to JSON and write it to a file. | -| *More to Come* | | Additional service functions are being developed and will be added to enhance the capabilities of AgentScope further. | - -For details about each Service Function, please consult the API references, where the docstrings provide comprehensive information about the parameters, expected input formats, return types, and any additional options that can modify the behavior of the Service Function. - -## Usage - -In AgentScope, each Service Function comes with a meticulously crafted docstring and demonstrative test functions that provide detailed instructions on how to utilize it. To enhance the capabilities of your agents with these services, you can craft prompts for LLM to generate parameters for Service: - -By composing appropriate prompts that align with the information detailed in the Service Functions' docstrings, you can guide an LLM to generate responses that match the required parameters of a `Service`. - -```python -import json -import inspect -from agentscope.service import ServiceResponse -from agentscope.agents import AgentBase - - -def create_file(file_path: str, content: str = "") -> ServiceResponse: - """ - Create a file and write content to it. - - Args: - file_path (str): The path where the file will be created. - content (str): Content to write into the file. - - Returns: - ServiceResponse: where the boolean indicates success, and the - str contains an error message if any, including the error type. - """ - # ... [code omitted for brevity] - - -class YourAgent(AgentBase): - # ... [code omitted for brevity] - - def reply(self, x: dict = None) -> dict: - # ... [code omitted for brevity] - - # Construct the prompt for the agent to provide parameters in JSON - # format - prompt = ( - f"To complete the user request\n```{x['content']}```\n" - "Please provide the necessary parameters in JSON format for the " - "function:\n" - f"Function: {create_file.__name__}\n" - "Description: Create a file and write content to it.\n" - ) - - # Add details about the function parameters - sig = inspect.signature(create_file) - parameters = sig.parameters.items() - params_prompt = "\n".join( - f"- {name} ({param.annotation.__name__}): " - f"{'(default: ' + json.dumps(param.default) + ')'if param.default is not inspect.Parameter.empty else ''}" - for name, param in parameters - ) - prompt += params_prompt - - # Get the model response - model_response = self.model(prompt).text - - # Parse the model response and call the create_file function - # Additional extraction functions might be necessary - try: - kwargs = json.loads(model_response) - create_file(**kwargs) - except: - # Error handling - pass - - # ... [code omitted for brevity] -``` - -[[Return to the top]](#enhancing-agent-capabilities-with-service-functions) diff --git a/docs/sphinx_doc/source/tutorial/208-distribute.md b/docs/sphinx_doc/source/tutorial/208-distribute.md deleted file mode 100644 index 22e0f8d63..000000000 --- a/docs/sphinx_doc/source/tutorial/208-distribute.md +++ /dev/null @@ -1,153 +0,0 @@ -(208-distribute)= - -# Make Your Applications Distributed - -AgentScope is designed to be fully distributed, agent instances in one application can be deployed on different machines and run in parallel. This tutorial will introduce the features of AgentScope distributed and the distributed deployment method. - -## Features - -### Every agent is an "Actor" - -[The actor model](https://en.wikipedia.org/wiki/Actor_model) is a popular concept in concurrent programming and adopted by AgentScope. Every agent is an actor and interacts with other agents through messages. The flow of messages implies the execution order of the agent. Each agent has a `reply` method that consumes a message and generates another message, and the generated message can be sent to other agents. For example, the figure below shows the workflow of multiple agents. `A` to `F` are all agents, and the arrows represent messages. - -```{mermaid} -graph LR; -A-->B -A-->C -B-->D -C-->D -E-->F -D-->F -``` - -Among them, `B` and `C` can start execution simultaneously after receiving the message from `A`, and `E` can run immediately without waiting for `A`, `B`, `C,` and `D`. -By implementing each agent as an actor, an agent will automatically wait for its input `Msg` before starting to execute the reply method, and multiple agents can also automatically execute `reply` at the same time if their input messages are ready, which avoids complex parallel control and makes things simple. - -### Write centrally, run distributedly - -In AgentScope, agents can be started as separate processes on the same or different machines. However, application developers do not need to pay attention to where these agents are running; you only need to write application code in the main process using the procedural programming paradigm. AgentScope will help you convert the task into a distributed version. The following is a piece of application code: `A`, `B`, and `C` are running on different machines. - -``` -x = A() -y = B(x) -z = C(x) -``` - -Although this code appears to be executed completely sequentially, AgentScope will **automatically detect potential parallelism** in your code as shown in the flow graph below, which means `C` will not wait for `B` to complete before starting execution. - -```{mermaid} -graph LR; -A-->B -A-->C -``` - -## Easy Distributed Deployment - -Please follow the steps below to deploy your application distributedly. - -### Convert your agents - -`AgentBase` provided the `to_dist` method to convert the agent into a distributed version. -`to_dist` requires several parameters. - -- `host`: the hostname or IP address of the machine where the agent runs, defaults to `localhost`. -- `port`: the port of this agent's RPC server, defaults to `80`. -- `launch_server`: whether to launch an RPC server locally, defaults to `True`. -- `local_mode`: set to `True` if all agents run on the same machine, defaults to `True`. -- `lazy_launch`: if set to `True`, only launch the server when the agent is called. - -> The `to_dist` method is implemented based on [gRPC](https://grpc.io/). When 'launch_server' is set to `True`, it will start a gRPC server process, and the original agent will be transferred to the new process to run. - -### Run in multi-process mode - -AgentScope supports deployment in multi-process mode, where each agent is a sub-process of the application's main process, and all agents run on the same machine. -The usage is exactly the same as single process mode, and you only need to call the `to_dist` method after initialization. - -Suppose you have classes `A` and `B`, both of which inherit from `AgentBase`. - -```python -# import packages - -a = A( - name="A", - ..., -).to_dist() -b = B( - name="B", - ..., -).to_dist() - -x = None -while x is None or x.content != 'exit': - x = a(x) - x = b(x) -``` - -### Run on multiple machines - -AgentScope also supports to run agents on multiple machines. In this case, you need to start agents separately. For example, you can use the following code to start agent `A` on the machine with IP address `ip_a`. - -```python -# import packages - -server_a = RpcAgentServerLauncher( - agent_class=A, - agent_kwargs={ - "name": "A" - ... - }, - host=ip_a, - port=12001, -) -server_a.launch() -server_a.wait_until_terminate() -``` - -Similarly, you can start agent `B` on the machine with IP address `ip_b`. -Please make sure that the two machines can access each other using the IP addresses. - -```python -# import packages - -server_b = RpcAgentServerLauncher( - agent_class=B, - agent_kwargs={ - "name": "B", - ... - }, - host=ip_b, - port=12001, -) -server_b.launch() -server_b.wait_until_terminate() -``` - -Then, you can run the application's main process on any machine that can access `ip_a` and `ip_b`. - -```python -# import packages - -a = A( - name="A", - ... -).to_dist( - host=ip_a, - port=12001, - launch_server=False, -) -b = B( - name="B", - ... -).to_dist( - host=ip_b, - port=12002, - launch_server=False, -) - -x = None -while x is None or x.content != 'exit': - x = a(x) - x = b(x) -``` - -[[Return to the top]](#make-your-applications-distributed) diff --git a/docs/sphinx_doc/zh_CN/source/_static/custom.css b/docs/sphinx_doc/zh_CN/source/_static/custom.css new file mode 100644 index 000000000..68f11ceed --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/_static/custom.css @@ -0,0 +1,4 @@ +.language-selector a { + color: white; + width: 20px; +} \ No newline at end of file diff --git a/docs/sphinx_doc/zh_CN/source/_templates/language_selector.html b/docs/sphinx_doc/zh_CN/source/_templates/language_selector.html new file mode 100644 index 000000000..cd289bf7e --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/_templates/language_selector.html @@ -0,0 +1,5 @@ + +
+ English | + 中文 +
diff --git a/docs/sphinx_doc/zh_CN/source/_templates/layout.html b/docs/sphinx_doc/zh_CN/source/_templates/layout.html new file mode 100644 index 000000000..1d182d309 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/_templates/layout.html @@ -0,0 +1,3 @@ + +{% extends "!layout.html" %} {% block sidebartitle %} {{ super() }} {% include +"language_selector.html" %} {% endblock %} diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.agents.rst b/docs/sphinx_doc/zh_CN/source/agentscope.agents.rst new file mode 100644 index 000000000..ea644bcc9 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.agents.rst @@ -0,0 +1,59 @@ +Agents package +========================== + +operator module +------------------------------- + +.. automodule:: agentscope.agents.operator + :members: + :undoc-members: + :show-inheritance: + +agent module +------------------------------- + +.. automodule:: agentscope.agents.agent + :members: + :undoc-members: + :show-inheritance: + +rpc_agent module +------------------------------- + +.. automodule:: agentscope.agents.rpc_agent + :members: + :undoc-members: + :show-inheritance: + +user_agent module +------------------------------- + +.. automodule:: agentscope.agents.user_agent + :members: + :undoc-members: + :show-inheritance: + +dialog_agent module +------------------------------- + +.. automodule:: agentscope.agents.dialog_agent + :members: + :undoc-members: + :show-inheritance: + +dict_dialog_agent module +------------------------------- + +.. automodule:: agentscope.agents.dict_dialog_agent + :members: + :undoc-members: + :show-inheritance: + + +text_to_image_agent module +------------------------------- + +.. automodule:: agentscope.agents.text_to_image_agent + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.memory.rst b/docs/sphinx_doc/zh_CN/source/agentscope.memory.rst new file mode 100644 index 000000000..b8f3bee32 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.memory.rst @@ -0,0 +1,20 @@ +Memory package +========================== + + +memory module +-------------------------------- + +.. automodule:: agentscope.memory.memory + :members: + :undoc-members: + :show-inheritance: + +temporary\_memory module +------------------------------------------- + +.. automodule:: agentscope.memory.temporary_memory + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.models.rst b/docs/sphinx_doc/zh_CN/source/agentscope.models.rst new file mode 100644 index 000000000..a61cbdfaa --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.models.rst @@ -0,0 +1,42 @@ +Models package +========================== + +config module +------------------------------- + +.. automodule:: agentscope.models.config + :members: + :undoc-members: + :show-inheritance: + +model module +------------------------------- + +.. automodule:: agentscope.models.model + :members: + :undoc-members: + :show-inheritance: + +openai\_model module +--------------------------------------- + +.. automodule:: agentscope.models.openai_model + :members: + :undoc-members: + :show-inheritance: + +post\_model module +------------------------------------- + +.. automodule:: agentscope.models.post_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: agentscope.models + :members: load_model_by_config_name, clear_model_configs, read_model_configs + :undoc-members: + :show-inheritance: diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.pipelines.rst b/docs/sphinx_doc/zh_CN/source/agentscope.pipelines.rst new file mode 100644 index 000000000..6a387f3eb --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.pipelines.rst @@ -0,0 +1,18 @@ +Pipelines package +============================= + +pipeline module +------------------------------------- + +.. automodule:: agentscope.pipelines.pipeline + :members: + :undoc-members: + :show-inheritance: + +functional module +--------------------------------------- + +.. automodule:: agentscope.pipelines.functional + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.rpc.rst b/docs/sphinx_doc/zh_CN/source/agentscope.rpc.rst new file mode 100644 index 000000000..42a61abe9 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.rpc.rst @@ -0,0 +1,20 @@ +RPC package +======================= + +rpc\_agent\_client module +----------------------------------------- + +.. automodule:: agentscope.rpc.rpc_agent_client + :members: + :undoc-members: + :show-inheritance: + + +rpc\_agent\_pb2\_grpc module +-------------------------------------------- + +.. automodule:: agentscope.rpc.rpc_agent_pb2_grpc + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.rst b/docs/sphinx_doc/zh_CN/source/agentscope.rst new file mode 100644 index 000000000..13a8123cf --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.rst @@ -0,0 +1,48 @@ +Module contents +=============== + +constants module +-------------------------------- + +.. automodule:: agentscope.constants + :noindex: + :members: + :undoc-members: + :show-inheritance: + +file\_manager module +-------------------------------- + +.. automodule:: agentscope.file_manager + :noindex: + :members: + :undoc-members: + :show-inheritance: + +message module +-------------------------- + +.. automodule:: agentscope.message + :noindex: + :members: + :undoc-members: + :show-inheritance: + +msghub module +------------------------- + +.. automodule:: agentscope.msghub + :noindex: + :members: + :undoc-members: + :show-inheritance: + +prompt module +------------------------- + +.. automodule:: agentscope.prompt + :noindex: + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.execute_code.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.execute_code.rst new file mode 100644 index 000000000..9019a59aa --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.execute_code.rst @@ -0,0 +1,11 @@ +Code package +================================ + +exec\_python module +-------------------------------------------- + +.. automodule:: agentscope.service.execute_code.exec_python + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.file.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.file.rst new file mode 100644 index 000000000..ede1c10a2 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.file.rst @@ -0,0 +1,28 @@ +File package +================================ + + +common module +-------------------------------------- + +.. automodule:: agentscope.service.file.common + :members: + :undoc-members: + :show-inheritance: + +json module +------------------------------------ + +.. automodule:: agentscope.service.file.json + :members: + :undoc-members: + :show-inheritance: + +text module +------------------------------------ + +.. automodule:: agentscope.service.file.text + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.retrieval.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.retrieval.rst new file mode 100644 index 000000000..8b3686a43 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.retrieval.rst @@ -0,0 +1,20 @@ +Retrieval package +===================================== + + +retrieval\_from\_list module +---------------------------------------------------------- + +.. automodule:: agentscope.service.retrieval.retrieval_from_list + :members: + :undoc-members: + :show-inheritance: + +similarity module +----------------------------------------------- + +.. automodule:: agentscope.service.retrieval.similarity + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.rst new file mode 100644 index 000000000..6f8e7df1c --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.rst @@ -0,0 +1,33 @@ +Service package +=========================== + + +.. toctree:: + :maxdepth: 4 + + agentscope.service.execute_code + agentscope.service.file + agentscope.service.retrieval + agentscope.service.sql_query + agentscope.service.text_processing + agentscope.service.web_search + + +service\_status module +-------------------------------- + +.. automodule:: agentscope.service.service_status + :noindex: + :members: + :undoc-members: + :show-inheritance: + + +service\_response module +-------------------------------- + +.. automodule:: agentscope.service.service_response + :noindex: + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.sql_query.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.sql_query.rst new file mode 100644 index 000000000..6e748635b --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.sql_query.rst @@ -0,0 +1,27 @@ +SQL query package +====================================== + +mongodb module +--------------------------------------------- + +.. automodule:: agentscope.service.sql_query.mongodb + :members: + :undoc-members: + :show-inheritance: + +mysql module +------------------------------------------- + +.. automodule:: agentscope.service.sql_query.mysql + :members: + :undoc-members: + :show-inheritance: + +sqlite module +-------------------------------------------- + +.. automodule:: agentscope.service.sql_query.sqlite + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.text_processing.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.text_processing.rst new file mode 100644 index 000000000..a7558d16f --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.text_processing.rst @@ -0,0 +1,12 @@ +Text processing package +============================================ + + +summarization module +--------------------------------------------------------- + +.. automodule:: agentscope.service.text_processing.summarization + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.service.web_search.rst b/docs/sphinx_doc/zh_CN/source/agentscope.service.web_search.rst new file mode 100644 index 000000000..895ab915c --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.service.web_search.rst @@ -0,0 +1,11 @@ +Web search package +======================================= + +search module +--------------------------------------------- + +.. automodule:: agentscope.service.web_search.search + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.utils.rst b/docs/sphinx_doc/zh_CN/source/agentscope.utils.rst new file mode 100644 index 000000000..740188d8b --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.utils.rst @@ -0,0 +1,43 @@ +Utils package +========================= + + +common module +------------------------------- + +.. automodule:: agentscope.utils.common + :members: + :undoc-members: + :show-inheritance: + +logging\_utils module +--------------------------------------- + +.. automodule:: agentscope.utils.logging_utils + :members: + :undoc-members: + :show-inheritance: + +monitor module +-------------------------------- + +.. automodule:: agentscope.utils.monitor + :members: + :undoc-members: + :show-inheritance: + +token\_utils module +------------------------------------- + +.. automodule:: agentscope.utils.token_utils + :members: + :undoc-members: + :show-inheritance: + +tools module +------------------------------ + +.. automodule:: agentscope.utils.tools + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/sphinx_doc/zh_CN/source/agentscope.web.rst b/docs/sphinx_doc/zh_CN/source/agentscope.web.rst new file mode 100644 index 000000000..250189741 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/agentscope.web.rst @@ -0,0 +1,10 @@ +Web UI package +========================== + +app module +----------------------------- + +.. automodule:: agentscope.web._app + :members: + :show-inheritance: + diff --git a/docs/sphinx_doc/zh_CN/source/conf.py b/docs/sphinx_doc/zh_CN/source/conf.py new file mode 100644 index 000000000..9958a8963 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/conf.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys + +# sys.path.insert(0, os.path.abspath("../../../src/agentscope")) + + +# -- Project information ----------------------------------------------------- + +language = "zh_CN" + +project = "AgentScope" +copyright = "2024, Alibaba Tongyi Lab" +author = "SysML team of Alibaba Tongyi Lab" + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinxcontrib.mermaid", + "myst_parser", + "sphinx.ext.autosectionlabel", +] + +# Prefix document path to section labels, otherwise autogenerated labels would +# look like 'heading' rather than 'path/to/file:heading' +autosectionlabel_prefix_document = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "sphinx_rtd_theme" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +html_theme_options = { + "navigation_depth": 4, +} + +source_suffix = { + ".rst": "restructuredtext", + ".md": "markdown", +} + +html_css_files = [ + "custom.css", +] diff --git a/docs/sphinx_doc/zh_CN/source/index.rst b/docs/sphinx_doc/zh_CN/source/index.rst new file mode 100644 index 000000000..8373a6b1e --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/index.rst @@ -0,0 +1,46 @@ +.. AgentScope documentation master file, created by + sphinx-quickstart on Fri Jan 5 17:53:54 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +:github_url: https://github.com/modelscope/agentscope + +AgentScope 文档 +====================================== + + +.. include:: tutorial_zh/main.md + :parser: myst_parser.sphinx_ + +.. toctree:: + :maxdepth: 1 + :glob: + :hidden: + :caption: AgentScope 教程 + + tutorial_zh/quick_start.rst + tutorial_zh/advance.rst + tutorial_zh/contribute.rst + + +.. toctree:: + :maxdepth: 1 + :glob: + :caption: AgentScope API 文档 + + agentscope.agents + agentscope.memory + agentscope.models + agentscope.pipelines + agentscope.service + agentscope.rpc + agentscope.utils + agentscope.web + agentscope + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/101-agentscope.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/101-agentscope.md new file mode 100644 index 000000000..b3b8ae6b8 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/101-agentscope.md @@ -0,0 +1,93 @@ +(101-agentscope-zh)= + +# 关于AgentScope + +在此教程中,我们将通过回答问题的方式向您介绍AgentScope,包括什么是AgentScope,AgentScope +能做什么,以及我们为什么应该选择AgentScope。让我们开始吧! + +## 什么是AgentScope? + +AgentScope是以开发者为中心的多智能体平台,它使开发者能够更轻松地构建基于大语言模型的多智能体应用程序。 + +大模型的出现使得开发者能够构建多样化的应用程序,为了连接大语言模型和数据以及服务,并更好地解 +决复杂任务,AgentScope提供了一系列的开发工具和组件来提高开发效率。AgentScope以 + +- **易用性** +- **鲁棒性** +- **支持多模态数据** +- **分布式部署** + +为特点。 + +## 关键概念 + +### 信息(Message) + +是信息的载体(例如指令、多模态数据和对话内容)。在AgentScope中,消息是Python字典的子类, +具有`name`和`content`作为必要字段,`url`作为可选字段并指向额外的资源。 + +### 智能体(Agent) + +是能够与环境和其他智能体交互,并采取行动改变环境的自主实体。在AgentScope中, +智能体以消息作为输入,并生成相应的响应消息。 + +### 服务(Service) + +是使智能体能够执行特定任务的功能性API。在AgentScope中,服务分为模型API服务 +(用于使用大预言模型)和通用API服务(提供各种工具函数)。 + +### 工作流(Workflow) + +表示智能体执行和智能体之间的消息交换的有序序列,类似于TensorFlow中的计算图, +但其并不一定是DAG结构。 + +## 为什么选择AgentScope? + +**面向开发者的易用性。** +AgentScope为开发者提供了高易用性,包括灵活易用的语法糖、即拿即用的组件和预构建的multi-agent样例。 + +**可靠稳定的容错机制。** +AgentScope确保了对多种模型和APIs的容错性,并允许开发者构建定制的容错策略。 + +**全面兼容多模态数据。** +AgentScope支持多模态数据(例如文件、图像、音频和视频)的对话展示、消息传输和数据存储。 + +**高效分布式运行效率。** +AgentScope引入了基于actor的分布式机制,使得复杂的分布式工作流的集中式编程和自动并行优化成为可能。 + +## AgentScope是如何设计的? + +AgentScope由三个层次的层次结构组成。 +这些层次提供了对多智能体应用程序的支持,包括单个智能体的基本和高级功能(实用程序层)、资源和运行时管理(管理器和包装层)以及智能体级到工作流级的编程接口(智能体层)。 +AgentScope引入了直观的抽象,旨在满足每个层次固有的多样化功能,并简化构建多智能体系统时的复杂层间依赖关系。 +此外,我们提供了编程接口和默认机制,以增强多智能体系统在不同层次上对故障的韧性。 + +## AgentScope代码结构 + +```bash +AgentScope +├── src +│ ├── agentscope +│ | ├── agents # 与智能体相关的核心组件和实现。 +│ | ├── memory # 智能体记忆相关的结构。 +│ | ├── models # 用于集成不同模型API的接口。 +│ | ├── pipeline # 基础组件和实现,用于运行工作流。 +│ | ├── rpc # Rpc模块,用于智能体分布式部署。 +│ | ├── service # 为智能体提供各种功能的服务。 +| | ├── web # 基于网页的用户交互界面。 +│ | ├── utils # 辅助工具和帮助函数。 +│ | ├── prompt.py # 提示工程模块。 +│ | ├── message.py # 智能体之间消息传递的定义和实现。 +│ | ├── ... .. +│ | ├── ... .. +├── scripts # 用于启动本地模型API的脚本。 +├── examples # 不同应用程序的预构建示例。 +├── docs # 教程和API参考文档。 +├── tests # 单元测试模块,用于持续集成。 +├── LICENSE # AgentScope使用的官方许可协议。 +└── setup.py # 用于安装的设置脚本。 +├── ... .. +└── ... .. +``` + +[[返回顶端]](#101-agentscope-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/102-installation.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/102-installation.md new file mode 100644 index 000000000..3c5ee0a4c --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/102-installation.md @@ -0,0 +1,69 @@ +(102-installation-zh)= + +# 安装 + +为了安装AgentScope,您需要安装Python 3.9或更高版本。我们建议专门为AgentScope设置一个新的虚拟环境: + +## 创建虚拟环境 + +### 使用Conda + +如果您使用Conda作为环境管理工具,您可以使用以下命令创建一个新的Python 3.9虚拟环境: + +```bash +# 使用Python 3.9创建一个名为"agentscope"的新虚拟环境 +conda create -n agentscope python=3.9 + +# 激活虚拟环境 +conda activate agentscope +``` + +### 使用Virtualenv + +如果您使用`virtualenv`,您可以首先安装它(如果尚未安装),然后按照以下步骤创建一个新的虚拟环境: + +```bash +# 如果尚未安装virtualenv,请先安装它 +pip install virtualenv + +# 使用Python 3.9创建一个名为"agentscope"的新虚拟环境 +virtualenv agentscope --python=python3.9 + +# 激活虚拟环境 +source agentscope/bin/activate # 在Windows上使用`agentscope\Scripts\activate` +``` + +## 安装AgentScope + +### 从源码安装 + +按照以下步骤从源代码安装AgentScope,并以可编辑模式安装AgentScope: + +**_注意:该项目正在积极开发中,建议从源码安装AgentScope!_** + +```bash +# 从GitHub上拉取AgentScope的源代码 +git clone https://github.com/modelscope/agentscope.git +cd AgentScope + +# 针对本地化的multi-agent应用 +pip install -e . +# 为分布式multi-agent应用 +pip install -e .[distribute] # 在Mac上使用`pip install -e .\[distribute\]` +``` + +**注意**:`[distribute]`选项安装了分布式应用程序所需的额外依赖项。在运行这些命令之前,请激活您的虚拟环境。 + +### 使用Pip安装 + +如果您选择从Pypi安装AgentScope,可以使用`pip`轻松地完成: + +```bash +# 针对本地化的multi-agent应用 +pip install agentscope + +# 为分布式multi-agent应用 +pip install agentscope[distribute] # 在Mac上使用`pip install agentscope\[distribute\]` +``` + +[[返回顶端]](#102-installation-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/103-example.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/103-example.md new file mode 100644 index 000000000..1095832aa --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/103-example.md @@ -0,0 +1,117 @@ +(103-example-zh)= + +# 快速开始 + +AgentScope内置了灵活的通信机制。在本教程中,我们将通过一个简单的独立对话示例介绍AgentScope的基本用法。 + +## 第一步:准备模型 + +为了更好的构建多智能体应用,AgentScope将模型的部署与调用解耦开,以API服务调用的方式支持各种不同的模型。 + +在模型部署方面,用户可以使用第三方模型服务,例如OpenAI API,HuggingFace Inference +API,同时也可以通过仓库中的[脚本](https://github.com/modelscope/agentscope/blob/main/scripts/README.md)快速部署本地开源模型服务, +目前已支持通过Flask配合Transformers(或ModelScope)快速建立基础的模型服务,同时也已经支持通过FastChat和vllm等推理引擎部署本地模型服务。 + +模型调用方面,AgentScope通过`ModelWrapper`类提供OpenAI API和RESTful Post Request调用的封装。 +目前支持的OpenAI API包括了对话(Chat),图片生成(Image generation)和嵌入式(Embedding)。 +用户可以通过设定不同的model config来指定模型服务。 + +| 模型使用 | APIs | +|--------------|------------------------------------------------------------------------| +| 文本生成 | *OpenAI* chat API,FastChat和vllm | +| 图片生成 | *DALL-E* API | +| 文本嵌入 | 文本Embedding | +| 基于Post请求的API | *Huggingface*/*ModelScope* Inference API,以及用户自定应的基于Post请求的API | + +每种API都有其特定的配置要求。例如,要配置OpenAI API,您需要在模型配置中填写以下字段: + +```python +model_config = { + "config_name": "{config_name}", # A unique name for the model config. + "model_type": "openai", # Choose from "openai", "openai_dall_e", or "openai_embedding". + "model_name": "{model_name}", # The model identifier used in the OpenAI API, such as "gpt-3.5-turbo", "gpt-4", or "text-embedding-ada-002". + "api_key": "xxx", # Your OpenAI API key. If unset, the environment variable OPENAI_API_KEY is used. + "organization": "xxx", # Your OpenAI organization ID. If unset, the environment variable OPENAI_ORGANIZATION is used. +} +``` + +对于开源模型,我们支持与HuggingFace、ModelScope、FastChat和vllm等各种模型接口的集成。您可以在`scripts +`目录中找到部署这些服务的脚本,详细说明请见[[模型服务]](203-model). + +您可以通过调用AgentScope的初始化方法来注册您的配置。此外,您还可以一次性加载多个模型配置。 + +```python +import agentscope + +# 一次性初始化多个模型配置 +openai_cfg_dict = { + # ... +} +modelscope_cfg_dict = { + # ... +} +agentscope.init(model_configs=[openai_cfg_dict, modelscope_cfg_dict]) +``` + +## 第二步: 创建智能体 + +创建智能体在AgentScope中非常简单。在初始化AgentScope时,您可以使用模型配置初始化AgentScope,然后定义每个智能体及其对应的角色和特定模型。 + +```python +import agentscope +from agentscope.agents import DialogAgent, UserAgent + +# 读取模型配置 +agentscope.init(model_configs="./model_configs.json") + +# 创建一个对话智能体和一个用户智能体 +dialogAgent = DialogAgent(name="assistant", model_config_name="gpt-4", sys_prompt="You are a helpful ai assistant") +userAgent = UserAgent() +``` + +**注意**:请参考[[使用Agent Pool自定义您的自定义智能体]](201-agent)以获取所有可用的智能体以及创建自定义的智能体。 + +## 第三步:智能体对话 + +消息(Message)是AgentScope中智能体之间的主要通信手段。 +它是一个Python字典,包括了一些基本字段,如消息的`content`和消息发送者的`name`。可选地,消息可以包括一个`url`,指向本地文件(图像、视频或音频)或网站。 + +```python +from agentscope.message import Msg + +# 来自Alice的简单文本消息示例 +message_from_alice = Msg("Alice", "Hi!") + +# 来自Bob的带有附加图像的消息示例 +message_from_bob = Msg("Bob", "What about this picture I took?", url="/path/to/picture.jpg") +``` + +为了在两个智能体之间开始对话,例如`dialog_agent`和`user_agent`,您可以使用以下循环。对话将持续进行,直到用户输入`"exit"`,这将终止交互。 + +```python +x = None +while True: + x = dialogAgent(x) + x = userAgent(x) + + # 如果用户输入"exit",则终止对话 + if x.content == "exit": + print("Exiting the conversation.") + break +``` + +进阶的使用中,AgentScope提供了Pipeline来管理智能体之间消息流的选项。 +其中`sequentialpipeline`代表顺序对话,每个智能体从上一个智能体接收消息并生成其响应。 + +```python +from agentscope.pipelines.functional import sequentialpipeline + +# 在Pipeline结构中执行对话循环 +x = None +while x is None or x.content != "exit": + x = sequentialpipeline([dialog_agent, user_agent]) +``` + +有关如何使用Pipeline进行复杂的智能体交互的更多细节,请参考[[Agent Interactions: Dive deeper into Pipelines and Message Hub]](202-pipeline)。 + +[[返回顶部]](#103-example-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/104-usecase.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/104-usecase.md new file mode 100644 index 000000000..2ae04abc9 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/104-usecase.md @@ -0,0 +1,319 @@ +(104-usecase-zh)= + +# 创造您的第一个应用 + +img + +在介绍更多AgentScope的高阶内容前,我们先会给您展示如何利用AgentScope内置的功能,快速搭建一个狼人杀游戏模拟应用。 + +**狼人杀**是一个广为人知的桌面游戏。游戏设定在一个虚拟世界的村庄里。村庄里有真正的村民、也有伪装成村民的狼人;每个参与者都在游戏中都有自己的角色。对于村民方而言,他们的胜利条件是在全灭之前找出并杀死所有的狼人;对于狼人方而言,胜利条件就是杀死所有的村民。狼人杀这样的游戏是一个很好的可以自动展示多个有不同目标的智能体之间如何互动。 + +话不多说,让我们开始通过狼人杀这个游戏,解锁AgentScope多智体的应用吧! + +## 开始 + +首先,确保您已经正确安装和配置好AgentScope。除此之外,本节内容会涉及到`Model API`, `Agent`, `Msg`和 `Pipeline`这几个概念(详情可以参考[关于AgentScope](101-agentscope))。以下是本节教程内容概览。 + +**提示**:本教程中的所有配置和代码文件均可以在`examples/werewolf`中找到。 + +### 第一步: 准备模型API和设定模型配置 + +就像我们在上一节教程中展示的,您需要为了您选择的OpenAI chat API, FastChat, 或vllm 准备一个JSON样式的模型配置文件。更多细节和高阶永达,比如用POST API配置本地模型,可以参考[关于模型](203-model)。 + +```json +[ + { + "config_name": "gpt-4-temperature-0.0", + "model_type": "openai", + "model_name": "gpt-4", + "api_key": "xxx", + "organization": "xxx", + "generate_args": { + "temperature": 0.0 + } + }, +] +``` + +### 第二步:定义每个智能体(Agent)的角色 + +在狼人杀游戏中,不同智能体会扮演不同角色;不同角色的智能体也有不同的能力和目标。羡慕便是我们大概归纳 + +- 普通村民:普通的村民,没有特殊能力,只是寻求生存到最后。 +- 狼人:伪装成村民的掠夺者,目标是比村民活得更久并杀死村民们。 +- 预言家:一位拥有每晚看到一名玩家真实身份能力的村民。 +- 女巫:一位村民,每晚可以救活或毒杀一名玩家 + +要实现你自己的agent,你需要继承AgentBase并实现reply函数,当通过agent1(x)调用agent实例时,将执行此函数。 + +```python +from agentscope.agents import AgentBase + +class MyAgent(AgentBase): + def reply(self, x): + # Do something here + ... + return x +``` + +AgentScope提供了几种开箱即用的agent实现,作为一个agent样例池。在这个应用程序中,我们使用一个内置agent,DictDialogAgent。这里我们给出一个将玩家分配为狼人角色的DictDialogAgent的示例配置: + +```json +{ + "class": "DictDialogAgent", + "args": { + "name": "Player1", + "sys_prompt": "Act as a player in a werewolf game. You are Player1 and\nthere are totally 6 players, named Player1, Player2, Player3, Player4, Player5 and Player6.\n\nPLAYER ROLES:\nIn werewolf game, players are divided into two werewolves, two villagers, one seer, and one witch. Note only werewolves know who are their teammates.\nWerewolves: They know their teammates' identities and attempt to eliminate a villager each night while trying to remain undetected.\nVillagers: They do not know who the werewolves are and must work together during the day to deduce who the werewolves might be and vote to eliminate them.\nSeer: A villager with the ability to learn the true identity of one player each night. This role is crucial for the villagers to gain information.\nWitch: A character who has a one-time ability to save a player from being eliminated at night (sometimes this is a potion of life) and a one-time ability to eliminate a player at night (a potion of death).\n\nGAME RULE:\nThe game consists of two phases: night phase and day phase. The two phases are repeated until werewolf or villager wins the game.\n1. Night Phase: During the night, the werewolves discuss and vote for a player to eliminate. Special roles also perform their actions at this time (e.g., the Seer chooses a player to learn their role, the witch chooses a decide if save the player).\n2. Day Phase: During the day, all surviving players discuss who they suspect might be a werewolf. No one reveals their role unless it serves a strategic purpose. After the discussion, a vote is taken, and the player with the most votes is \"lynched\" or eliminated from the game.\n\nVICTORY CONDITION:\nFor werewolves, they win the game if the number of werewolves is equal to or greater than the number of remaining villagers.\nFor villagers, they win if they identify and eliminate all of the werewolves in the group.\n\nCONSTRAINTS:\n1. Your response should be in the first person.\n2. This is a conversational game. You should respond only based on the conversation history and your strategy.\n\nYou are playing werewolf in this game.\n", + "model_config_name": "gpt-3.5-turbo", + "use_memory": true + } +} +``` + +在这个配置中,Player1被指定为一个DictDialogAgent。参数包括一个系统提示(sys_prompt),它可以指导agent的行为;一个模型配置名(model_config_name),它决定了模型配置的名称;以及一个标志(use_memory),指示agent是否应该记住过去的互动。 + +对于其他玩家,大家可以根据他们的角色进行定制。每个角色可能有不同的提示、模型或记忆设置。你可以参考位于AgentScope示例目录下的`examples/werewolf/configs/agent_configs.json`文件。 + +### 第三步:初始化AgentScope和Agents + +现在我们已经定义了角色,我们可以初始化AgentScope环境和所有agents。这个过程通过AgentScope的几行代码和我们准备的配置文件(假设有2个狼人、2个村民、1个女巫和1个预言家)就能简单完成: + +```python +import agentscope + +# read model and agent configs, and initialize agents automatically +survivors = agentscope.init( + model_configs="./configs/model_configs.json", + agent_configs="./configs/agent_configs.json", + logger_level="DEBUG", +) + +# Define the roles within the game. This list should match the order and number +# of agents specified in the 'agent_configs.json' file. +roles = ["werewolf", "werewolf", "villager", "villager", "seer", "witch"] + +# Based on their roles, assign the initialized agents to variables. +# This helps us reference them easily in the game logic. +wolves, villagers, witch, seer = survivors[:2], survivors[2:-2], survivors[-1], survivors[-2] +``` + +上面这段代码中,我们为我们的agent分配了角色,并将它们与决定它们行为的配置相关联。 + +### 第四步:构建游戏逻辑 + +在这一步中,你将使用AgentScope的辅助工具设置游戏逻辑,并组织狼人游戏的流程。 + +#### 使用 Pipeline 和 MsgHub + +为了简化agent通信的构建,AgentScope提供了两个有用的概念:Pipeline和MsgHub。 + +- **Pipeline**:它能让用户轻松地编程实现agent之间的不同通信编排。 + + ```python + from agentscope.pipelines import SequentialPipeline + + pipe = SequentialPipeline(agent1, agent2, agent3) + x = pipe(x) # the message x will be passed and replied by agent 1,2,3 in order + ``` + +- **MsgHub**:你可能已经注意到,上述所有例子都是一对一通信。为了实现群聊,我们提供了另一个通信辅助工具msghub。有了它,参与者的消息将自动广播给所有其他参与者。在这种情况下,参与agent甚至不需要输入和输出消息。我们需要做的只是决定发言的顺序。此外,msghub还支持参与者的动态控制。 + + ```python + with msghub(participants=[agent1, agent2, agent3]) as hub: + agent1() + agent2() + + # Broadcast a message to all participants + hub.broadcast(Msg("Host", "Welcome to join the group chat!")) + + # Add or delete participants dynamically + hub.delete(agent1) + hub.add(agent4) + ``` + +#### 实现狼人杀的游戏流程 + +游戏逻辑分为两个主要阶段:(1)夜晚,狼人行动;以及(2)白天,所有玩家讨论和投票。每个阶段都将通过使用pipelines来管理多agent通信的代码部分来处理。 + +- **1.1 夜晚阶段:狼人讨论和投票** + +在夜晚阶段,狼人必须相互讨论以决定一个要杀死的目标。msghub函数为狼人之间的通信创建了一个消息中心,其中每个agent发送的消息都能被msghub内的所有其他agent观察到。 + +```python +# start the game +for i in range(1, MAX_GAME_ROUND + 1): + # Night phase: werewolves discuss + hint = HostMsg(content=Prompts.to_wolves.format(n2s(wolves))) + with msghub(wolves, announcement=hint) as hub: + for _ in range(MAX_WEREWOLF_DISCUSSION_ROUND): + x = sequentialpipeline(wolves) + if x.agreement: + break +``` + +讨论结束后,根据少数服从多数,狼人进行投票选出他们的目标。然后,投票的结果将广播给所有狼人。 + +注意:具体的提示和实用函数可以在`examples/werewolf`中找到。 + +```python + # werewolves vote + hint = HostMsg(content=Prompts.to_wolves_vote) + votes = [extract_name_and_id(wolf(hint).content)[0] for wolf in wolves] + # broadcast the result to werewolves + dead_player = [majority_vote(votes)] + hub.broadcast( + HostMsg(content=Prompts.to_wolves_res.format(dead_player[0])), + ) +``` + +- **1.2 女巫的回合** + +如果女巫还活着,她就有机会使用她的力量:救被狼人选中的(被杀的)玩家,或使用她的毒药去杀一位玩家。 + +```python + # Witch's turn + healing_used_tonight = False + if witch in survivors: + if healing: + # Witch decides whether to use the healing potion + hint = HostMsg( + content=Prompts.to_witch_resurrect.format_map( + {"witch_name": witch.name, "dead_name": dead_player[0]}, + ), + ) + # Witch decides whether to use the poison + if witch(hint).resurrect: + healing_used_tonight = True + dead_player.pop() + healing = False +``` + +- **1.3 预言家的回合** + +预言家有机会揭示一名玩家的真实身份。这信息对于村民方来说可能至关重要。`observe()`函数允许每个agent注意到一个消息,而不需要立即产生回复。 + +```python + # Seer's turn + if seer in survivors: + # Seer chooses a player to reveal their identity + hint = HostMsg( + content=Prompts.to_seer.format(seer.name, n2s(survivors)), + ) + x = seer(hint) + + player, idx = extract_name_and_id(x.content) + role = "werewolf" if roles[idx] == "werewolf" else "villager" + hint = HostMsg(content=Prompts.to_seer_result.format(player, role)) + seer.observe(hint) +``` + +- **1.4 更新存活玩家** + +根据夜间采取的行动,程序需要更新幸存玩家的列表。 + +```python + # Update the list of survivors and werewolves after the night's events + survivors, wolves = update_alive_players(survivors, wolves, dead_player) +``` + +- **2.1 白天阶段:讨论和投票** + +在白天,所有存活玩家将讨论然后投票以淘汰一名疑似狼人的玩家。 + +```python + # Daytime discussion + with msghub(survivors, announcement=hints) as hub: + # Discuss + x = sequentialpipeline(survivors) + # Vote + hint = HostMsg(content=Prompts.to_all_vote.format(n2s(survivors))) + votes = [extract_name_and_id(_(hint).content)[0] for _ in survivors] + vote_res = majority_vote(votes) + # Broadcast the voting result to all players + result = HostMsg(content=Prompts.to_all_res.format(vote_res)) + hub.broadcast(result) + # Update the list of survivors and werewolves after the vote + survivors, wolves = update_alive_players(survivors, wolves, vote_res) +``` + +- **2.2 检查胜利条件** + +每个阶段结束后,游戏会检查是狼人还是村民获胜。 + +```python + # Check if either side has won + if check_winning(survivors, wolves, "Moderator"): + break +``` + +- **2.3 继续到下一轮** + +如果狼人和村民都没有获胜,游戏将继续到下一轮。 + +```python + # If the game hasn't ended, prepare for the next round + hub.broadcast(HostMsg(content=Prompts.to_all_continue)) +``` + +这些代码块展现了使用AgentScope的`msghub`和`pipeline`的狼人游戏的核心游戏循环,这些工具有助于轻松管理应用程序的操作逻辑。 + +### 第五步:运行应用 + +完成了以上游戏逻辑和agent的设置,你已经可以运行狼人游戏了。通过执行`pipeline`,游戏将按预定义的阶段进行,agents +基于它们的角色和上述编码的策略进行互动: + +```bash +cd examples/werewolf +python main.py # Assuming the pipeline is implemented in main.py +``` + +游戏开始后,你将在终端看到类似于下面的日志输出。这些日志展示了游戏是如何展开的: + +```bash +Moderator: Player1 and Player2, you are werewolves. If you are alone, eliminate a player, else discuss with your teammates and reach an agreement. Response in the following format which can be loaded by json.loads(){ + "thought": "thought", + "speak": "thoughts summary to say to others", + "agreement": "whether the discussion reached an agreement or not(true/false)" +} +Player1: Player2, let's discuss who we should eliminate tonight. +Player2: I think we should eliminate Player4 tonight. They seem suspicious to me. +Player1: I think we should eliminate Player4 tonight. They seem suspicious to me. +Player2: I agree with Player2, let's eliminate Player4 tonight. They seem suspicious. +Moderator: Which player do you vote to kill? Response in the following format which can be loaded by python json.loads() +{{ + "thought": "thought" , + "speak": "player_name" +}} +Player1: Player4 +Player2: Player4 +Moderator: The player with the most votes is Player4. +Moderator: Player6, you're witch. Tonight Player4 is eliminated. Would you like to resurrect Player4? Response in the following format which can be loaded by python json.loads() +{ + "thought": "thought", + "speak": "thoughts summary to say", + "resurrect": "true/false" +} +Player6: I have considered the options, and I choose to resurrect Player4. +Moderator: Player5, you're seer. Which player in Player1, Player2, Player3, Player4, Player5 and Player6 would you like to check tonight? Response in the following json format which can be loaded by python json.loads() +{ + "thought": "thought" , + "speak": "player_name" +} +Player5: Player3 +Moderator: Okay, the role of Player3 is villager. +Moderator: The day is coming, all the players open your eyes. Last night is peaceful, no player is eliminated. +... +``` + +## 下一步 + +现在你已经掌握了如何使用AgentScope方便地设置多agent应用程序。您可以随意修改游戏,包括引入额外的角色或者引入更复杂的策略。如果你想更深入地探索AgentScope的更多功能,比如agent使用的内存管理和服务函数,请参考高级探索部分的教程并查阅API参考。 + +## 其他样例 + +- 简单群聊样例: [examples/Simple Conversation](https://github.com/modelscope/agentscope/tree/main/examples/simple_chat/README.md) +- 狼人杀样例[examples/Werewolves](https://github.com/modelscope/agentscope/tree/main/examples/werewolves/README.md) +- 分布式agents样例[examples/Distributed Agents](https://github.com/modelscope/agentscope/tree/main/examples/distributed_agents/README.md) +- ... + +[[返回顶部]](#104-usecase-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/105-logging.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/105-logging.md new file mode 100644 index 000000000..107851103 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/105-logging.md @@ -0,0 +1,95 @@ +(105-logging-zh)= + +# 日志和WebUI + +本节教程主要是关于AgentScope的日志记录(logging)功能。我们会介绍如何能美观地将这些日志可视化。这个模块会帮助您更方便、清晰、有组织地跟踪智能体之间的互动和各种系统消息。 + +## Logging + +日志功能的首先包含的是一个基于Python内置 `logging`的根绝多智体场景可客制化的`loguru.logger`模块。其包含下面的一些特性: + +- **调整输出字体颜色**:为了增加日志的可读性,该模块为不同的在对话中发言智能体提供不同颜色的字体高亮。 +- **重定向错误输出(stderr)**: 该模块自动抓取报错信息,在日志中用`ERROR`层级记录。 +- **客制化日志记录等级**: 该模块增加了一个日志记录等级`CHAT`,用来记录智能体之间的对话和互动。 +- **定制格式**:格式化日志包含了时间戳、记录等级、function名字和行号。智能体之间的对话会用不同的格式显示。 + +### 设置日志记录(Logger) + +我们推荐通过`agentscope.init`来设置logger,包括设定记录等级: + +```python +import agentscope + +LOG_LEVEL = Literal[ + "CHAT", + "TRACE", + "DEBUG", + "INFO", + "SUCCESS", + "WARNING", + "ERROR", + "CRITICAL", +] + +agentscope.init(..., logger_level="INFO") +``` + +### Logging a Chat Message + +### 记录对话消息 + +开发者可以通过记录`message`来追踪智能体之间的对话。下面是一些简单的如何记录`message`的例子例子: + +```python +# Log a simple string message. +logger.chat("Hello World!") + +# Log a `msg` representing dialogue with a speaker and content. +logger.chat({"name": "User", "content": "Hello, how are you?"}) +logger.chat({"name": "Agent", "content": "I'm fine, thank you!"}) +``` + +### 记录系统信息 + +系统日志对于跟踪应用程序的状态和识别问题至关重要。以下是记录不同级别系统信息的方法: + +```python +# Log general information useful for understanding the flow of the application. +logger.info("The dialogue agent has started successfully.") + +# Log a warning message indicating a potential issue that isn't immediately problematic. +logger.warning("The agent is running slower than expected.") + +# Log an error message when something has gone wrong. +logger.error("The agent encountered an unexpected error while processing a request.") +``` + +## 将日志与WebUI集成 + +为了可视化这些日志和运行细节,AgentScope提供了一个简单的网络界面。 + +### 快速运行 + +你可以用以下Python代码中运行WebUI: + +```python +import agentscope + +agentscope.web.init( + path_save="YOUR_SAVE_PATH" +) +``` + +通过这种方式,你可以在 `http://127.0.0.1:5000` 中看到所有运行中的实例和项目,如下所示 + +![webui](https://img.alicdn.com/imgextra/i3/O1CN01kpHFkn1HpeYEkn60I_!!6000000000807-0-tps-3104-1849.jpg) + +通过点击一个运行中的实例,我们可以观察到更多细节。 + +![The running details](https://img.alicdn.com/imgextra/i2/O1CN01AZtsf31MIHm4FmjjO_!!6000000001411-0-tps-3104-1849.jpg) + +### 注意 + +WebUI仍在开发中。我们将在未来提供更多功能和更好的用户体验。 + +[[返回顶部]](#105-logging-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/201-agent.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/201-agent.md new file mode 100644 index 000000000..9a1bcb52b --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/201-agent.md @@ -0,0 +1,174 @@ +(201-agent-zh)= + +# 定制你自己的Agent + +本教程帮助你更深入地理解Agent,并引导你通过使用AgentScope定制自己的自定义agent。 +我们首先介绍一个称为AgentBase的基本抽象概念,它作为基类维护所有agent的通用行为。然后,我们将探讨AgentPool,这是一个由预构建的、专门化的agent组成的集合,每个agent都设计有特定的目的。最后,我们将演示如何定制你自己的agent,确保它符合你项目的需求。 + +## 理解 `AgentBase` + +`AgentBase`类是AgentScope内所有agent结构的架构基石。作为所有自定义agent的超类,它提供了一个包含基本属性和方法的综合模板,这些属性和方法支撑了任何会话agent的核心功能。 + +每个AgentBase的派生类由几个关键特性组成: + +* `memory`(记忆):这个属性使agent能够保留和回忆过去的互动,允许它们在持续的对话中保持上下文。关于memory的更多细节,我们会在[记忆和消息管理部分](205-memory)讨论。 + +* `model`(模型):模型是agent的计算引擎,负责根据现有的记忆和输入做出响应。关于model的更多细节,我们在[使用模型API与不同模型源部分](203-model)讨论 + +* `sys_prompt`(系统提示)和`engine`(引擎):系统提示作为预定义的指令,指导agent在其互动中的行为;而engine用于动态生成合适的提示。关于它们的更多细节,我们会在[提示引擎部分](206-prompt)讨论。 + +除了这些属性,`AgentBase` 还为agent提供了一些关键方法,如 `observe` 和 `reply`: + +* `observe()`:通过这个方法,一个agent可以注意到消息而不立即回复,允许它根据观察到的消息更新它的记忆。 +* `reply()`:这是开发者必须实现的主要方法。它定义了agent对于传入消息的响应行为,封装了agent输出的逻辑。 + +此外,为了统一接口和类型提示,我们引入了另一个基类`Operator`,它通过 `__call__` 函数表示对输入数据执行某些操作。并且我们让 `AgentBase` 成为 `Operator` 的一个子类。 + +```python +class AgentBase(Operator): + # ... [code omitted for brevity] + + def __init__( + self, + name: str, + sys_prompt: Optional[str] = None, + model_config_name: str = None, + use_memory: bool = True, + memory_config: Optional[dict] = None, + ) -> None: + + # ... [code omitted for brevity] + def observe(self, x: Union[dict, Sequence[dict]]) -> None: + # An optional method for updating the agent's internal state based on + # messages it has observed. This method can be used to enrich the + # agent's understanding and memory without producing an immediate + # response. + self.memory.add(x) + + def reply(self, x: dict = None) -> dict: + # The core method to be implemented by custom agents. It defines the + # logic for processing an input message and generating a suitable + # response. + raise NotImplementedError( + f"Agent [{type(self).__name__}] is missing the required " + f'"reply" function.', + ) + + # ... [code omitted for brevity] +``` + +## 探索AgentPool + +在 AgentScope 中的 `AgentPool` 是一个经过精选的,随时可用的,专门化agent集合。这些agent中的每一个都是为了特定的角色量身定做,并配备了处理特定任务的默认行为。`AgentPool` 旨在通过提供各种 Agent 模板来加快开发过程。 + +以下是一个总结了 AgentPool 中一些关键agent的功能的表格: + +| Agent 种类 | 描述 | Typical Use Cases | +|--------------|--------------------------------------------------|-------------------| +| `AgentBase` | 作为所有agent的超类,提供了必要的属性和方法。 | 构建任何自定义agent的基础。 | +| `DialogAgent` | 通过理解上下文和生成连贯的响应来管理对话。 | 客户服务机器人,虚拟助手。 | +| `UserAgent` | 与用户互动以收集输入,生成可能包括URL或基于所需键的额外具体信息的消息。 | 为agent收集用户输入 | +| *更多agent* | AgentScope 正在不断扩大agent池,加入更多专门化的agent,以适应多样化的应用。 | | + +## 从Agent池中定制Agent + +从 AgentPool 中定制一个agent,使您能够根据您的多agent应用的独特需求来调整其功能。您可以通过调整配置和提示来轻松修改现有agent,或者,对于更广泛的定制,您可以进行二次开发 + +下面,我们提供了如何配置来自 AgentPool 的各种agent的用法: + +### `DialogAgent` + +* **回复方法**:`reply` 方法是处理输入消息和生成响应的主要逻辑所在 + +```python +def reply(self, x: dict = None) -> dict: + # Additional processing steps can occur here + + if x is not None: + self.memory.add(x) # Update the memory with the input + + # Generate a prompt for the language model using the system prompt and memory + prompt = self.engine.join(self.sys_prompt, self.memory.get_memory()) + + # Invoke the language model with the prepared prompt + response = self.model(prompt).text + + # Format the response and create a message object + msg = Msg(self.name, response) + + # Record the message to memory and return it + self.memory.add(msg) + return msg +``` + +* **用法**:为了定制一个用于客户服务机器人的 `DialogAgent`: + +```python +from agentscope.agents import DialogAgent + +# Configuration for the DialogAgent +dialog_agent_config = { + "name": "ServiceBot", + "model_config_name": "gpt-3.5", # Specify the model used for dialogue generation + "sys_prompt": "Act as AI assistant to interact with the others. Try to " + "reponse on one line.\n", # Custom prompt for the agent + # Other configurations specific to the DialogAgent +} + +# Create and configure the DialogAgent +service_bot = DialogAgent(**dialog_agent_config) +``` + +### `UserAgent` + +* **回复方法**:这个方法通过提示内容以及在需要时附加的键和URL来处理用户输入。收集到的数据存储在agent记忆中的一个message对象里,用于记录或稍后使用,并返回该message作为响应。 + +```python +def reply( + self, + x: dict = None, + required_keys: Optional[Union[list[str], str]] = None, +) -> dict: + # Check if there is initial data to be added to memory + if x is not None: + self.memory.add(x) + + content = input(f"{self.name}: ") # Prompt the user for input + kwargs = {} + + # Prompt for additional information based on the required keys + if required_keys is not None: + if isinstance(required_keys, str): + required_keys = [required_keys] + for key in required_keys: + kwargs[key] = input(f"{key}: ") + + # Optionally prompt for a URL if required + url = None + if self.require_url: + url = input("URL: ") + + # Create a message object with the collected input and additional details + msg = Msg(self.name, content=content, url=url, **kwargs) + + # Add the message object to memory + self.memory.add(msg) + return msg +``` + +* **用法**:配置一个 UserAgent 用于收集用户输入和URL(文件、图像、视频、音频或网站的URL): + +```python +from agentscope.agents import UserAgent + +# Configuration for UserAgent +user_agent_config = { + "name": "User", + "require_url": True, # If true, the agent will require a URL +} + +# Create and configure the UserAgent +user_proxy_agent = UserAgent(**user_agent_config) +``` + +[[返回顶部]](#201-agent-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/202-pipeline.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/202-pipeline.md new file mode 100644 index 000000000..ee3ca8575 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/202-pipeline.md @@ -0,0 +1,302 @@ +(202-pipeline-zh)= + +# 智能体间交互 + +**Pipeline**和**Message Hub**主要用于描绘应用中信息的交换和传播过程,它们极大简化了Multi-Agent应用流程的编排工作。 +在本教程中,我们将详细的介绍Pipeline和Message Hub的原理和使用方式。 + +## Pipeline + +在AgentScope中,消息的交换、传播构成了Multi-Agent应用。但是对复杂应用来说,细致的描绘每一次信息交流对开发者来说是非常困难的。 +`Pipeline`主要用于简化“描述消息传播”的编程工作。 + +`Pipeline`中接收的对象是`Operator`,即信息的加工和传播单元(例如智能体`Agent`是`Operator +`的一个子类),而`Pipeline`自身也是`Operator`的子类。以下是所有`Pipeline`的基类: + +```python +class PipelineBase(Operator): + """所有pipelines的基础接口.""" + # ... [为简洁起见省略代码] + @abstractmethod + def __call__(self, x: Optional[dict] = None) -> dict: + """在这定义pipeline采取的操作。 + + Args: + x (Optional[`dict`], optional): + 对话历史以及一些环境信息。 + + Returns: + `dict`: 经过Pipeline处理后的返回消息。 + """ +``` + +### 类别 + +为了方便开发者的使用,对于同一功能的Pipeline,AgentScope提供了两种不同的实现策略: + +* **对象类型Pipeline** + + * 这些Pipeline是面向对象的,继承自 + `PipelineBase`。它们本身是`Operator`,可以与其他运算符组合以创建复杂的交互模式,并且可以复用。 + + ```python + # 实例化并调用 + pipeline = ClsPipeline([agent1, agent2, agent3]) + x = pipeline(x) + ``` + +* **函数式Pipeline** + + * 函数式Pipeline是独立的函数实现,在不需要复用的一次性使用场景中很有用。 + + ```python + # 只需要调用 + x = funcpipeline([agent1, agent2, agent3], x) + ``` + +Pipeline根据其功能被分类成以下的类型。下表概述了 AgentScope 中可用的不同 Pipeline: + +| 运算符类型Pipeline | 函数式Pipeline | 描述 | +| -------------------- | ------------------- | ------------------------------------------------------------ | +| `SequentialPipeline` | `sequentialpipeline` | 按顺序执行一系列运算符,将一个运算符的输出作为下一个运算符的输入。 | +| `IfElsePipeline` | `ifelsepipeline` | 实现条件逻辑,如果条件为真,则执行一个运算符;如果条件为假,则执行另一个运算符。 | +| `SwitchPipeline` | `switchpipeline` | 实现分支选择,根据条件的结果从映射集中执行一个运算符。 | +| `ForLoopPipeline` | `forlooppipeline` | 重复执行一个运算符,要么达到设定的迭代次数,要么直到满足指定的中止条件。 | +| `WhileLoopPipeline` | `whilelooppipeline` | 只要给定条件保持为真,就持续执行一个运算符。 | +| - | `placeholder` | 在流控制中不需要任何操作的分支,如 if-else/switch 中充当占位符。 | + +### 使用说明 + +本节通过比较有无 Pipeline 的情况下多智能体应用程序中逻辑实现的方式,来阐释 Pipeline 如何简化逻辑实现。 +**注意:** 请注意,在下面提供的示例中,我们使用术语 `agent` 来代表任何可以作为 `Operator` 的实例。这是为了便于理解,并说明 Pipeline 是如何协调不同操作之间的交互的。您可以将 `agent` 替换为任何 `Operator`,从而在实践中允许 `agent` 和 `pipeline` 的混合使用。 + +#### `SequentialPipeline` + +* 不使用 pipeline: + + ```python + x = agent1(x) + x = agent2(x) + x = agent3(x) + ``` + +* 使用 pipeline: + + ```python + from agentscope.pipelines import SequentialPipeline + + pipe = SequentialPipeline([agent1, agent2, agent3]) + x = pipe(x) + ``` + +* 使用函数式 pipeline: + + ```python + from agentscope.pipelines import sequentialpipeline + + x = sequentialpipeline([agent1, agent2, agent3], x) + ``` + +#### `IfElsePipeline` + +* 不使用 pipeline: + + ```python + if condition(x): + x = agent1(x) + else: + x = agent2(x) + ``` + +* 使用 pipeline: + + ```python + from agentscope.pipelines import IfElsePipeline + + pipe = IfElsePipeline(condition, agent1, agent2) + x = pipe(x) + ``` + +* 使用函数式 pipeline: + + ```python + from agentscope.functional import ifelsepipeline + + x = ifelsepipeline(condition, agent1, agent2, x) + ``` + +#### `SwitchPipeline` + +* 不使用 pipeline: + + ```python + switch_result = condition(x) + if switch_result == case1: + x = agent1(x) + elif switch_result == case2: + x = agent2(x) + else: + x = default_agent(x) + ``` + +* 使用 pipeline: + + ```python + from agentscope.pipelines import SwitchPipeline + + case_operators = {case1: agent1, case2: agent2} + pipe = SwitchPipeline(condition, case_operators, default_agent) + x = pipe(x) + ``` + +* 使用函数式 pipeline: + + ```python + from agentscope.functional import switchpipeline + + case_operators = {case1: agent1, case2: agent2} + x = switchpipeline(condition, case_operators, default_agent, x) + ``` + +#### `ForLoopPipeline` + +* 不使用 pipeline: + + ```python + for i in range(max_iterations): + x = agent(x) + if break_condition(x): + break + ``` + +* 使用 pipeline: + + ```python + from agentscope.pipelines import ForLoopPipeline + + pipe = ForLoopPipeline(agent, max_iterations, break_condition) + x = pipe(x) + ``` + +* 使用函数式 pipeline: + + ```python + from agentscope.functional import forlooppipeline + + x = forlooppipeline(agent, max_iterations, break_condition, x) + ``` + +#### `WhileLoopPipeline` + +* 不使用 pipeline: + + ```python + while condition(x): + x = agent(x) + ``` + +* 使用 pipeline: + + ```python + from agentscope.pipelines import WhileLoopPipeline + + pipe = WhileLoopPipeline(agent, condition) + x = pipe(x) + ``` + +* 使用函数式 pipeline: + + ```python + from agentscope.functional import whilelooppipeline + + x = whilelooppipeline(agent, condition, x) + ``` + +### Pipeline 组合 + +值得注意的是,AgentScope 支持组合 Pipeline 来创建复杂的交互。例如,我们可以创建一个 Pipeline,按顺序执行一系列智能体,然后执行另一个 Pipeline,根据条件执行一系列智能体。 + +```python +from agentscope.pipelines import SequentialPipeline, IfElsePipeline +# 创建一个按顺序执行智能体的 Pipeline +pipe1 = SequentialPipeline([agent1, agent2, agent3]) +# 创建一个条件执行智能体的 Pipeline +pipe2 = IfElsePipeline(condition, agent4, agent5) +# 创建一个按顺序执行 pipe1 和 pipe2 的 Pipeline +pipe3 = SequentialPipeline([pipe1, pipe2]) +# 调用 Pipeline +x = pipe3(x) +``` + +## MsgHub + +`MsgHub` 旨在管理一组智能体之间的对话/群聊,其中允许共享消息。通过 `MsgHub`,智能体可以使用 `broadcast` 向群组中的所有其他智能体广播消息。 + +以下是 `MsgHub` 的核心类: + +```python +class MsgHubManager: + """MsgHub 管理类,用于在一组智能体之间共享对话。""" + # ... [为简洁起见省略代码] + + def broadcast(self, msg: Union[dict, list[dict]]) -> None: + """将消息广播给所有参与者。""" + for agent in self.participants: + agent.observe(msg) + + def add(self, new_participant: Union[Sequence[AgentBase], AgentBase]) -> None: + """将新参与者加入此 hub""" + # ... [为简洁起见省略代码] + + def delete(self, participant: Union[Sequence[AgentBase], AgentBase]) -> None: + """从参与者中删除智能体。""" + # ... [为简洁起见省略代码] +``` + +### 使用说明 + +#### 创建一个 MsgHub + +要创建一个 `MsgHub`,请通过调用 `msghub` 辅助函数并传入参与智能体列表来实例化一个 `MsgHubManager`。此外,您可以提供一个可选的初始声明`announcement`,如果提供,将在初始化时广播给所有参与者。 + +```python +from agentscope.msg_hub import msghub + +# Initialize MsgHub with participating agents +hub_manager = msghub( + participants=[agent1, agent2, agent3], announcement=initial_announcement +) +``` + +#### 在 MsgHub 中广播消息 + +`MsgHubManager` 可以与上下文管理器一起使用,以处理`MsgHub`环境的搭建和关闭: + +```python +with msghub( + participants=[agent1, agent2, agent3], announcement=initial_announcement +) as hub: + # 智能体现在可以在这个块中广播和接收消息 + agent1() + agent2() + + # 或者手动广播一条消息 + hub.broadcast(some_message) + +``` + +退出上下文块时,`MsgHubManager` 会确保每个智能体的听众被清空,防止在中心环境之外的任何意外消息共享。 + +#### 添加和删除参与者 + +你可以动态地从 `MsgHub` 中添加或移除智能体: + +```python +# 添加一个新参与者 +hub.add(new_agent) + +# 移除一个现有的参与者 +hub.delete(existing_agent) +``` + +[[返回顶部]](#202-pipeline-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/203-model.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/203-model.md new file mode 100644 index 000000000..1f56943b4 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/203-model.md @@ -0,0 +1,209 @@ +(203-model-zh)= + +# 关于模型 + +AgentScope中,模型的部署和调用是通过`ModelWrapper`来解耦开的,开发者可以通过提供模型配置(Model config)的方式指定模型,同时AgentScope也提供脚本支持开发者自定义模型服务。 + +## 支持模型 + +目前,AgentScope内置以下模型服务API的支持: + +- OpenAI API,包括对话(Chat),图片生成(DALL-E)和文本嵌入(Embedding)。 +- Post请求API,基于Post请求实现的模型推理服务,包括Huggingface/ModelScope + Inference API和各种符合Post请求格式的API。 + +## 配置方式 + +AgentScope中,用户通过`agentscope.init`接口中的`model_configs`参数来指定模型配置。 +`model_configs`可以是一个字典,或是一个字典的列表,抑或是一个指向模型配置文件的路径。 + +```python +import agentscope + +agentscope.init(model_configs=MODEL_CONFIG_OR_PATH) +``` + +其中`model_configs`的一个例子如下: + +```python +model_configs = [ + { + "config_name": "gpt-4-temperature-0.0", + "model_type": "openai", + "model_name": "gpt-4", + "api_key": "xxx", + "organization": "xxx", + "generate_args": { + "temperature": 0.0 + } + }, + { + "config_name": "dall-e-3-size-1024x1024", + "model_type": "openai_dall_e", + "model_name": "dall-e-3", + "api_key": "xxx", + "organization": "xxx", + "generate_args": { + "size": "1024x1024" + } + }, + # 在这里可以配置额外的模型 +] +``` + +### 配置格式 + +AgentScope中,模型配置是一个字典,用于指定模型的类型以及设定调用参数。 +我们将模型配置中的字段分为_基础参数_和_调用参数_两类。 +其中,基础参数包括`config_name`和`model_type`两个基本字段,分别用于区分不同的模型配置和具 +体的`ModelWrapper`类型。 + +```python +{ + # 基础参数 + "config_name": "gpt-4-temperature-0.0", # 模型配置名称 + "model_type": "openai", # 对应`ModelWrapper`类型 + + # 详细参数 + # ... +} +``` + +#### 基础参数 + +基础参数中,`config_name`是模型配置的标识,我们将在初始化智能体时用该字段指定使用的模型服务。 + +`model_type`对应了`ModelWrapper`的类型,用于指定模型服务的类型。对应源代码中`ModelWrapper +`类的`model_type`字段。 + +```python +class OpenAIChatWrapper(OpenAIWrapper): + """The model wrapper for OpenAI's chat API.""" + + model_type: str = "openai" + # ... +``` + +在目前的AgentScope中,所支持的`model_type`类型,对应的`ModelWrapper`类,以及支持的 +API如下: + +| 任务 | model_type | ModelWrapper | 支持的 API | +|--------|--------------------|--------------------------|------------------------------------------------------------| +| 文本生成 | `openai` | `OpenAIChatWrapper` | 标准 OpenAI 聊天 API,FastChat 和 vllm | +| 图像生成 | `openai_dall_e` | `OpenAIDALLEWrapper` | 用于生成图像的 DALL-E API | +| 文本嵌入 | `openai_embedding` | `OpenAIEmbeddingWrapper` | 用于文本嵌入的 API | +| POST请求 | `post_api` | `PostAPIModelWrapperBase` | Huggingface/ModelScope inference API 和自定义的post request API | + +#### 详细参数 + +根据`ModelWrapper`的不同,详细参数中所包含的参数不同。 +但是所有的详细参数都会用于初始化`ModelWrapper`类的实例,因此,更详细的参数说明可以根据`ModelWrapper`类的构造函数来查看。 + +- OpenAI的API,包括文本生成,图像生成,文本嵌入,其模型配置参数如下 + +```python +{ + # 基础参数 + "config_name": "gpt-4_temperature-0.0", + "model_type": "openai", + + # 详细参数 + # 必要参数 + "model_name": "gpt-4", # OpenAI模型名称 + + # 可选参数 + "api_key": "xxx", # OpenAI API Key,如果没有提供则会从环境变量中读取 + "organization": "xxx", # 组织名称,如果没有提供则会从环境变量中读取 + "client_args": { # 初始化OpenAI API Client的参数 + "max_retries": 3, + }, + "generate_args": { # 调用模型时传入的参数 + "temperature": 0.0 + }, + "budget": 100.0 # API费用预算 +} +``` + +- Post request API,其模型配置参数如下 + +```python +{ + # 基础参数 + "config_name": "gpt-4_temperature-0.0", + "model_type": "post_api", + + # 详细参数 + "api_url": "http://xxx.png", + "headers": { + # e.g. "Authorization": "Bearer xxx", + }, + + # 可选参数,需要根据Post请求API的要求进行配置 + "json_args": { + # e.g. "temperature": 0.0 + } + # ... +} +``` + +## 从零搭建模型服务 + +针对需要自己搭建模型服务的开发者,AgentScope提供了一些脚本来帮助开发者快速搭建模型服务。您可以在[scripts] +( + +具体而言,AgentScope提供了以下模型服务的脚本: + +- 基于Flask + HuggingFace的模型服务 +- 基于Flask + ModelScope的模型服务 +- FastChat推理引擎 +- vllm推理引擎 + +下面我们以Flask + hugingface的模型服务为例,介绍如何使用AgentScope的模型服务脚本。 +更多的模型服务脚本可以在[scripts](https://github.com/modelscope/agentscope/blob/main/scripts/)中查看。 + +### 基于Flask 的模型 API 服务 + +[Flask](https://github.com/pallets/flask)是一个轻量级的Web应用框架。利用Flask可以很容易地搭建本地模型API服务。 + +#### 使用transformers库 + +##### 安装transformers并配置服务 + +按照以下命令安装 Flask 和 Transformers: + +```bash +pip install Flask transformers +``` + +以模型 `meta-llama/Llama-2-7b-chat-hf` 和端口 `8000` 为例,通过运行以下命令来设置模型 API 服务。 + +```bash +python flask_transformers/setup_hf_service.py + --model_name_or_path meta-llama/Llama-2-7b-chat-hf + --device "cuda:0" # or "cpu" + --port 8000 +``` + +您可以将 `meta-llama/Llama-2-7b-chat-hf` 替换为 huggingface 模型中心的任何模型卡片。 + +##### 在AgentScope中调用 + +在 AgentScope 中,您可以使用以下模型配置加载型:[./flask_transformers/model_config.json](https://github.com/modelscope/agentscope/blob/main/scripts/flask_transformers/model_config.json)。 + +```json +{ + "model_type": "post_api", + "config_name": "flask_llama2-7b-chat", + "api_url": "http://127.0.0.1:8000/llm/", + "json_args": { + "max_length": 4096, + "temperature": 0.5 + } +} +``` + +##### 注意 + +在这种模型服务中,来自 post 请求的消息应该是 **STRING** 格式。您可以使用来自 *transformers* 的[聊天模型模板](https://huggingface.co/docs/transformers/main/chat_templating),只需在[`./flask_transformers/setup_hf_service.py`](https://github.com/modelscope/agentscope/blob/main/scripts/flask_transformers/setup_hf_service.py)做一点修改即可。 + +[[返回顶部]](#203-model-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/204-service.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/204-service.md new file mode 100644 index 000000000..47ddca013 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/204-service.md @@ -0,0 +1,238 @@ +(204-service-zh)= + +# 关于服务 + +服务函数(Service function)是可以增强智能体能力工具,例如执行Python代码、网络搜索、 +文件操作等。本教程概述了AgentScope中可用的服务功能,同时介绍如何使用它们来增强智能体的能力。 + +## Service函数概览 + +下面的表格按照类型概述了各种Service函数。以下函数可以通过`agentscope.service.{函数名}`进行调用。 + +| Service场景 | Service函数名称 | 描述 | +|------------|-----------------------| ------------------------------------------------------------ | +| 代码 | `execute_python_code` | 执行一段 Python 代码,可选择在 Docker
容器内部执行。 | +| 检索 | `retrieve_from_list` | 根据给定的标准从列表中检索特定项目。 | +| SQL查询 | `query_mysql` | 在 MySQL 数据库上执行 SQL 查询并返回结果。 | +| | `query_sqlite` | 在 SQLite 数据库上执行 SQL 查询并返回结果。 | +| | `query_mongodb` | 对 MongoDB 集合执行查询或操作。 | +| 文本处理 | `summarization` | 使用大型语言模型总结一段文字以突出其主要要点。 | +| 网络搜索 | `web_search` | 使用指定的搜索引擎(当前支持 Google 和 Bing)执行网络搜索。 | +| 文件处理 | `create_file` | 在指定路径创建一个新文件,并可选择添加初始内容。 | +| | `delete_file` | 删除由文件路径指定的文件。 | +| | `move_file` | 将文件从一个路径移动或重命名到另一个路径。 | +| | `create_directory` | 在指定路径创建一个新的目录。 | +| | `delete_directory` | 删除一个目录及其所有内容。 | +| | `move_directory` | 将目录从一个路径移动或重命名到另一个路径。 | +| | `read_text_file` | 读取并返回文本文件的内容。 | +| | `write_text_file` | 向指定路径的文件写入文本内容。 | +| | `read_json_file` | 读取并解析 JSON 文件的内容。 | +| | `write_json_file` | 将 Python 对象序列化为 JSON 并写入到文件。 | +| *更多服务即将推出* | | 正在开发更多服务功能,并将添加到 AgentScope 以进一步增强其能力。 | + +关于详细的参数、预期输入格式、返回类型,请参阅[API文档](https://modelscope.github.io/agentscope/)。 + +## 使用Service函数 + +AgentScope为Service函数提供了两个服务类,分别是`ServiceFactory`和`ServiceResponse`。 + +- `ServiceFactory`的主要作用是将一般的Python函数编程大模型可以直接使用的形式,同时自动生成函数说明。 +- `ServiceResponse`是一个字典的子类,为所有Service函数提供了统一的调用结果接口。 + +### 关于ServiceFactory + +智能体使用的工具一般是函数类型,开发者需要准备大模型能够直接调用的函数,并且需要提供函数的说明。 +但是一般的函数往往需要开发者提供部分参数(例如秘钥,用户名,特定的网址等),然后大模型才能够 +使用。同时为多个函数生成特定格式的说明也是一件繁琐的事情。 + +为了解决上述问题,AgentScope提出了`ServiceFactory`,对于给定的Service +函数,它允许开发者指定部分参数,生成一个大模型可以直接调用的函数,并且自动根据Docstring生成函数说明。 +以Bing网络搜索函数为例。 + +```python +def bing_search( + question: str, + api_key: str, + num_results: int = 10, + **kwargs: Any, +) -> ServiceResponse: + """ + Search question in Bing Search API and return the searching results + + Args: + question (`str`): + The search query string. + api_key (`str`): + The API key provided for authenticating with the Bing Search API. + num_results (`int`, defaults to `10`): + The number of search results to return. + **kwargs (`Any`): + Additional keyword arguments to be included in the search query. + For more details, please refer to + https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters + + [omitted for brevity] + """ +``` + +上述函数中,`question`是大模型填写的字段,而`api_key`,`num_results`是开发者需要提供的参数。 +我们利用`ServiceFactory`的`get`函数进行处理: + +```python +from agentscope.service import ServiceFactory + +func, func_intro = ServiceFactory.get( + bing_search, + api_key="xxx", + num_results=3) +``` + +上述代码中,ServiceFactory生成的func和下面的函数是等价的: + +```python +def bing_search(question: str) -> ServiceResponse: + """ + Search question in Bing Search API and return the searching results + + Args: + question (`str`): + The search query string. + """ + return bing_search(question, api_key="xxx", num_results=3) +``` + +生成的JSON Schema格式说明如下,该格式的函数说明可以直接用于OpenAI API中的tools字段。 +用户也可以根据自己的需求进行二次修改。 + +```python +# print(func_intro) +{ + "type": "function", + "function": { + "name": "bing_search", + "description": "Search question in Bing Search API and return the searching results", + "parameters": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "The search query string." + } + }, + "required": [ + "question" + ] + } + } +} +``` + +**注意**:`ServiceFactory`生成的函数和参数说明(包括描述,类型,默认值)是从函数的docstring +中自动提取的,因此建议原函数的docstring应该按照Google风格进行书写,以便更好的提取函数说明。 + +**建议**: + +- Service函数的名称应该是自解释的,这样智能体可以理解函数并正确使用它。 +- 在定义函数时应提供参数的类型(例如`def func(a: int, b: str, c: bool)`),以便智能体正确指定参数。 + +### 关于ServiceResponse + +`ServiceResponse`是对调用的结果的封装,包含了`status`和`content`两个字段。 +当Service函数正常运行结束时,`status`为`ServiceExecStatus. +SUCCESS`,`content`为函数的返回值。而当运行出现错误时,`status`为`ServiceExecStatus. +Error`,`content`内为错误信息。 + +```python +class ServiceResponse(dict): + """Used to wrap the execution results of the services""" + + __setattr__ = dict.__setitem__ + __getattr__ = dict.__getitem__ + + def __init__( + self, + status: ServiceExecStatus, + content: Any, + ): + """Constructor of ServiceResponse + + Args: + status (`ServiceExeStatus`): + The execution status of the service. + content (`Any`) + If the argument`status` is `SUCCESS`, `content` is the + response. We use `object` here to support various objects, + e.g. str, dict, image, video, etc. + Otherwise, `content` is the error message. + """ + self.status = status + self.content = content + + # ... [为简洁起见省略代码] + +``` + +## 示例 + +```python +import json +import inspect +from agentscope.service import ServiceResponse +from agentscope.agents import AgentBase + + +def create_file(file_path: str, content: str = "") -> ServiceResponse: + """ + 创建文件并向其中写入内容。 + + Args: + file_path (str): 将要创建文件的路径。 + content (str): 要写入文件的内容。 + + Returns: + ServiceResponse: 其中布尔值指示成功与否,字符串包含任何错误消息(如果有),包括错误类型。 + """ + # ... [为简洁起见省略代码] + + +class YourAgent(AgentBase): + # ... [为简洁起见省略代码] + + def reply(self, x: dict = None) -> dict: + # ... [为简洁起见省略代码] + + # 构造提示,让代理提供 JSON 格式的参数 + prompt = ( + f"To complete the user request\n```{x['content']}```\n" + "Please provide the necessary parameters in JSON format for the " + "function:\n" + f"Function: {create_file.__name__}\n" + "Description: Create a file and write content to it.\n" + ) + + # 添加关于函数参数的详细信息 + sig = inspect.signature(create_file) + parameters = sig.parameters.items() + params_prompt = "\n".join( + f"- {name} ({param.annotation.__name__}): " + f"{'(default: ' + json.dumps(param.default) + ')'if param.default is not inspect.Parameter.empty else ''}" + for name, param in parameters + ) + prompt += params_prompt + + # 获取模型响应 + model_response = self.model(prompt).text + + # 解析模型响应并调用 create_file 函数 + # 可能需要额外的提取函数 + try: + kwargs = json.loads(model_response) + create_file(**kwargs) + except: + # 错误处理 + pass + + # ... [为简洁起见省略代码] +``` + +[[返回顶部]](#204-service-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/205-memory.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/205-memory.md new file mode 100644 index 000000000..0949b4387 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/205-memory.md @@ -0,0 +1,214 @@ +(205-memory-zh)= + +# 关于记忆 + +AgentScope中,记忆(memory)用于存储历史消息,从而使智能体能够根据上下文提供更加连贯,更加 +自然的响应。 +本教程将首先介绍memory中信息的载体,消息(message),然后介绍AgentScope中记忆模块的功能 +和使用方法。 + +## 关于消息(Message) + +### 消息基类(`MessageBase`) + +AgentScope中,消息基类是Python字典的子类,由`name`,`content`两个必选字段和一个可选的字段 +`url`组成。由于是字典类型,开发者也可以根据需要增加其他字段。 +其中,`name`字段代表消息的发起者,`content`字段代表消息的内容,`url +`则代表消息中附加的数据链接,可以是指向多模态数据的本地链接,也可以是网络链接。 +当一个消息被创建时,将会自动创建一个唯一的ID,用于标识这条消息。同时,消息的创建时间也会以 +时间戳的形式自动记录下来。 + +具体实现中,AgentScope首先提供了一个`MessageBase`基类,用于定义消息的基本属性和使用方法。 +与一般的字典类型不同,`MessageBase`的实例化对象可以通过`对象名.{属性名}`的方式访问属性值, +也可以通过`对象名['属性名']`的方式访问属性值。 +其中,`MessageBase` 类的关键属性如下: + +- **`name`**:该属性表示信息的发起者。这是一项关键的元数据,在需要区分不同发言者的场景中非常有用。 +- **`content`**:信息本身的内容。它可以包括文本、结构化数据或其他与交互相关且需要智能体处理的内容形式。 +- **`url`**:可选属性,允许信息链接到外部资源。这些可以是指向文件的直接链接、多模态数据或网页。 +- **`timestamp`**:时间戳,显示信息创建的时间。 +- **`id`**:每条信息在创建时都会被分配一个唯一标识符(ID)。 + +```python +class MessageBase(dict): + """Base Message class, which is used to maintain information for dialog, + memory and used to construct prompt. + """ + + def __init__( + self, + name: str, + content: Any, + url: Optional[Union[Sequence[str], str]] = None, + timestamp: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Initialize the message object + + Args: + name (`str`): + The name of who send the message. It's often used in + role-playing scenario to tell the name of the sender. + However, you can also only use `role` when calling openai api. + The usage of `name` refers to + https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models. + content (`Any`): + The content of the message. + url (`Optional[Union[list[str], str]]`, defaults to None): + A url to file, image, video, audio or website. + timestamp (`Optional[str]`, defaults to None): + The timestamp of the message, if None, it will be set to + current time. + **kwargs (`Any`): + Other attributes of the message. For OpenAI API, you should + add "role" from `["system", "user", "assistant", "function"]`. + When calling OpenAI API, `"role": "assistant"` will be added + to the messages that don't have "role" attribute. + + """ + # id and timestamp will be added to the object as its attributes + # rather than items in dict + self.id = uuid4().hex + if timestamp is None: + self.timestamp = _get_timestamp() + else: + self.timestamp = timestamp + + self.name = name + self.content = content + + if url: + self.url = url + + self.update(kwargs) + + def __getattr__(self, key: Any) -> Any: + try: + return self[key] + except KeyError as e: + raise AttributeError(f"no attribute '{key}'") from e + + def __setattr__(self, key: Any, value: Any) -> None: + self[key] = value + + def __delattr__(self, key: Any) -> None: + try: + del self[key] + except KeyError as e: + raise AttributeError(f"no attribute '{key}'") from e + + def to_str(self) -> str: + """Return the string representation of the message""" + raise NotImplementedError + + def serialize(self) -> str: + """Return the serialized message.""" + raise NotImplementedError + + # ... [省略代码以简化] +``` + +### 消息类(`Msg`) + +`Msg`类是AgentScope中最常用的消息类。它继承了 `MessageBase`类,并实现了`to_str` 和 +`serialize` 抽象方法。 +在一个Agent类中,其`reply`函数通常会返回一个`Msg`类的实例,以便在AgentScope中进行消息的 +传递。 + +```python +class Msg(MessageBase): + """The Message class.""" + + def __init__( + self, + name: str, + content: Any, + url: Optional[Union[Sequence[str], str]] = None, + timestamp: Optional[str] = None, + echo: bool = False, + **kwargs: Any, + ) -> None: + super().__init__( + name=name, + content=content, + url=url, + timestamp=timestamp, + **kwargs, + ) + if echo: + logger.chat(self) + + def to_str(self) -> str: + """Return the string representation of the message""" + return f"{self.name}: {self.content}" + + def serialize(self) -> str: + return json.dumps({"__type": "Msg", **self}) +``` + +## 关于记忆(Memory) + +### 关于记忆基类(`MemoryBase`) + +`MemoryBase` 是一个抽象类,以结构化的方式处理智能体的记忆。它定义了存储、检索、删除和操作 +*信息*内容的操作。 + +```python +class MemoryBase(ABC): + # ... [省略代码以简化] + + def get_memory( + self, + return_type: PromptType = PromptType.LIST, + recent_n: Optional[int] = None, + filter_func: Optional[Callable[[int, dict], bool]] = None, + ) -> Union[list, str]: + raise NotImplementedError + + def add(self, memories: Union[list[dict], dict]) -> None: + raise NotImplementedError + + def delete(self, index: Union[Iterable, int]) -> None: + raise NotImplementedError + + def load( + self, + memories: Union[str, dict, list], + overwrite: bool = False, + ) -> None: + raise NotImplementedError + + def export( + self, + to_mem: bool = False, + file_path: Optional[str] = None, + ) -> Optional[list]: + raise NotImplementedError + + def clear(self) -> None: + raise NotImplementedError + + def size(self) -> int: + raise NotImplementedError +``` + +以下是 `MemoryBase` 的关键方法: + +- **`get_memory`**:这个方法负责从智能体的记忆中检索存储的信息。它可以按照 `return_type` 指定的格式返回这些信息。该方法还可以在提供 `recent_n` 时检索特定数量的最近信息,并且可以应用过滤函数( `filter_func` )来根据自定义标准选择信息。 +- **`add`**:这个方法用于将新的信息添加到智能体的记忆中。它可以接受单个信息或信息列表。每条信息通常是 `MessageBase` 或其子类的实例。 +- **`delete`**:此方法允许通过信息的索引(如果提供可迭代对象,则为索引集合)从记忆中删除信息。 +- **`load`**:这个方法允许从外部来源批量加载信息到智能体的内存中。`overwrite` 参数决定是否在加载新的信息集之前清除现有记忆。 +- **`export`**:这个方法便于将存储的*信息*从智能体的记忆中导出,要么导出到一个外部文件(由 `file_path` 指定),要么直接导入到程序的运行内存中(如果 `to_mem` 设置为 `True` )。 +- **`clear`**:这个方法清除智能体记忆中的所有*信息*,本质上是重置。 +- **`size`**:这个方法返回当前存储在智能体记忆中的信息数量。 + +### 关于`TemporaryMemory` + +`TemporaryMemory` 类是 `MemoryBase` 类的一个具体实现,提供了一个智能体运行期间存在的记忆存储,被用作智能体的默认记忆类型。除了 `MemoryBase` 的所有行为外,`TemporaryMemory` 还提供了检索的方法: + +- **`retrieve_by_embedding`**:基于它们的嵌入向量 (embeddings) 检索与查询最相似的 `messages`。它使用提供的度量标准来确定相关性,并可以返回前 `k` 个最相关的信息。 +- **`get_embeddings`**:返回记忆中所有信息的嵌入向量。如果信息没有嵌入向量,并且提供了嵌入模型,它将生成并存储信息的嵌入向量。 + +有关 `Memory` 和 `Msg` 使用的更多细节,请参考 API 文档。 + +[[返回顶端]](#205-memory-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/206-prompt.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/206-prompt.md new file mode 100644 index 000000000..9068c7d3b --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/206-prompt.md @@ -0,0 +1,69 @@ +(206-prompt-zh)= + +# 提示工程 + +**提示(prompt)** 是与语言模型互动时的关键组件,尤其是当寻求生成特定类型的输出或指导模型朝 +向期望行为时。 +AgentScope中允许开发者按照自己的需求定制提示,同时提供了`PromptEngine`类用以简化为大语言 +模型(LLMs)制作提示的过程。 +本教程将主要介绍如何使用`PromptEngine`类构建大模型的提示。 + +## 关于`PromptEngine`类 + +`PromptEngine`类提供了一种结构化的方式来合并不同的提示组件,比如指令、提示、对话历史和用户输入,以适合底层语言模型的格式。 + +### 提示工程的关键特性 + +- **模型兼容性**:可以与任何 `ModelWrapperBase` 的子类一起工作。 +- **提示类型**:支持字符串和列表风格的提示,与模型首选的输入格式保持一致。 + +### 初始化 + +当创建 `PromptEngine` 的实例时,您可以指定目标模型,以及(可选的)缩减原则、提示的最大长度、提示类型和总结模型(可以与目标模型相同)。 + +```python +model = OpenAIWrapper(...) +engine = PromptEngine(model) +``` + +### 合并提示组件 + +`PromptEngine` 的 `join` 方法提供了一个统一的接口来处理任意数量的组件,以构建最终的提示。 + +#### 输出字符串类型提示 + +如果模型期望的是字符串类型的提示,组件会通过换行符连接: + +```python +system_prompt = "You're a helpful assistant." +memory = ... # 可以是字典、列表或字符串 +hint_prompt = "Please respond in JSON format." + +prompt = engine.join(system_prompt, memory, hint_prompt) +# 结果将会是 ["You're a helpful assistant.", {"name": "user", "content": "What's the weather like today?"}] +``` + +#### 输出列表类型提示 + +对于使用列表类型提示的模型,比如 OpenAI 和 Huggingface 聊天模型,组件可以转换为 `Message` 对象,其类型是字典列表: + +```python +system_prompt = "You're a helpful assistant." +user_messages = [{"name": "user", "content": "What's the weather like today?"}] + +prompt = engine.join(system_prompt, user_messages) +# 结果将会是: [{"role": "assistant", "content": "You're a helpful assistant."}, {"name": "user", "content": "What's the weather like today?"}] +``` + +#### 动态格式化提示 + +`PromptEngine` 支持使用 `format_map` 参数动态提示,允许您灵活地将各种变量注入到不同场景的提示组件中: + +```python +variables = {"location": "London"} +hint_prompt = "Find the weather in {location}." + +prompt = engine.join(system_prompt, user_input, hint_prompt, format_map=variables) +``` + +[[返回顶端]](#206-prompt-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/207-monitor.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/207-monitor.md new file mode 100644 index 000000000..73e0daf4c --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/207-monitor.md @@ -0,0 +1,172 @@ +(207-monitor-zh)= + +# 监控器 + +在多智能体应用程序中,特别是那些依赖外部模型 API 的应用程序,监控使用情况和成本以防止过度使用并确保遵守速率限制是至关重要的。`MonitorBase` 类及其实现 `SqliteMonitor` 提供了一种追踪和调节这些 API 在您的应用中使用情况的方法。在本教程中,您将学习如何使用它们来监控 API 调用。 + +## 理解 AgentScope 中的监控器 + +`MonitorBase` 类作为一个接口,用于设置一个监控系统,跟踪各种度量指标,特别是关注 API 使用情况。它定义了一些方法,使得可以注册、检查、更新和管理与 API 调用相关的度量指标。 + +以下是 `MonitorBase` 的关键方法: + +- **`register`**:初始化用于跟踪的度量指标,例如进行的 API 调用次数,以及可选的配额用于执行限制。 +- **`exists`**:检查是否已经跟踪了某个度量指标。 +- **`add`**:将度量指标增加指定的值,用于每次 API 调用后计数。 +- **`update`**:一次更新多个度量指标,适用于批量更新。 +- **`clear`**:将度量指标重置为零,适用于配额周期重置。 +- **`remove`**:从监控中移除一个度量指标。 +- **`get_value`**:检索特定度量指标的当前值。 +- **`get_unit`**:获取与度量指标相关联的单元(例如,“调用”)。 +- **`get_quota`**:获取允许的 API 调用的最大值。 +- **`set_quota`**:调整度量指标的配额,如果 API 使用条款变更。 +- **`get_metric`**:返回有关特定度量指标的详细信息。 +- **`get_metrics`**:检索所有跟踪度量指标的信息,可以基于度量指标名称可选地进行过滤。 +- **`register_budget`**:为某个 API 调用设置预算,将初始化一系列用于计算成本的度量指标。 + +## 使用监控器 + +### 获取监控器实例 + +从 `MonitorFactory` 获取监控器实例开始监控,注意多次调用 `get_monitor` 方法将返回同一个监控器实例。 + +```python +# 确保在这之前你已经调用了agentscope.init(...) +monitor = MonitorFactory.get_monitor() +``` + +> 目前上述代码返回的是 `SqliteMonitor` 实例,它在 `agentscope.init` 中初始化。 +> `SqliteMonitor` 类是基于Sqlite3的 `MonitorBase` 类的默认实现。 + +### 基本使用 + +#### 注册 API 使用度量指标 + +注册一个新的度量指标以开始监控 token 数量: + +```python +monitor.register("token_num", metric_unit="token", quota=1000) +``` + +#### 更新度量指标 + +增加 `token_num` 度量指标: + +```python +monitor.add("token_num", 20) +``` + +#### 处理配额 + +如果 API 调用次数超出了配额,将抛出 `QuotaExceededError`: + +```python +try: + monitor.add("api_calls", amount) +except QuotaExceededError as e: + # 处理超出的配额,例如,通过暂停API调用 + print(e.message) +``` + +#### 检索度量指标 + +获取当前使用的 token 数量: + +```python +token_num_used = monitor.get_value("token_num") +``` + +#### 重置和移除度量指标 + +在新的周期开始时重置 token 计数: + +```python +monitor.clear("token_num") +``` + +如果不再需要,则移除度量指标: + +```python +monitor.remove("token_num") +``` + +### 进阶使用 + +> 这里的功能仍在开发中,接口可能会继续变化。 + +#### 使用 `prefix` 来区分度量指标 + +假设您有多个智能体/模型使用相同的 API 调用,但您想分别计算它们的 token 使用量,您可以在原始度量指标名称前添加一个唯一的前缀 `prefix`,`get_full_name` 函数提供了这样的功能。 + +例如,如果 model_A 和 model_B 都使用 OpenAI API,您可以通过以下代码注册这些度量指标。 + +```python +from agentscope.utils.monitor import get_full_name + +... + +# 在model_A中 +monitor.register(get_full_name('prompt_tokens', 'model_A')) +monitor.register(get_full_name('completion_tokens', 'model_A')) + +# 在model_B中 +monitor.register(get_full_name('prompt_tokens', 'model_B')) +monitor.register(get_full_name('completion_tokens', 'model_B')) +``` + +更新这些度量指标,只需使用 `update` 方法。 + +```python +# 在model_A中 +monitor.update(openai_response.usage.model_dump(), prefix='model_A') + +# 在model_B中 +monitor.update(openai_response.usage.model_dump(), prefix='model_B') +``` + +获取特定模型的度量指标,请使用 `get_metrics` 方法。 + +```python +# 获取model_A的度量指标 +model_A_metrics = monitor.get_metrics('model_A') + +# 获取model_B的度量指标 +model_B_metrics = monitor.get_metrics('model_B') +``` + +#### 为 API 注册预算 + +当前,监控器已经支持根据各种度量指标自动计算 API 调用的成本,您可以直接为模型设置预算以避免超出配额。 + +假设您正在使用 `gpt-4-turbo`,您的预算是10美元,您可以使用以下代码。 + +```python +model_name = 'gpt-4-turbo' +monitor.register_budget(model_name=model_name, value=10, prefix=model_name) +``` + +使用 `prefix` 为使用相同 API 的不同模型设置预算。 + +```python +model_name = 'gpt-4-turbo' +# 在model_A中 +monitor.register_budget(model_name=model_name, value=10, prefix=f'model_A.{model_name}') + +# 在model_B中 +monitor.register_budget(model_name=model_name, value=10, prefix=f'model_B.{model_name}') +``` + +`register_budget` 将自动注册计算总成本所需的度量指标,当这些度量指标更新时计算总成本,并在超出预算时抛出 `QuotaExceededError`。 + +```python +model_name = 'gpt-4-turbo' +try: + monitor.update(openai_response.usage.model_dump(), prefix=model_name) +except QuotaExceededError as e: + # 处理超出的配额 + print(e.message) +``` + +> **注意:** 此功能仍在实验阶段,只支持一些特定的 API,这些 API 已在 `agentscope.utils.monitor._get_pricing` 中列出。 + +[[Return to the top]](#207-monitor-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/208-distribute.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/208-distribute.md new file mode 100644 index 000000000..ed88d234d --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/208-distribute.md @@ -0,0 +1,157 @@ +(208-distribute-zh)= + +# 关于分布式 + +AgentScope实现了基于Actor模式的智能体分布式部署和并行优化,并提供以下的特点: + +- **自动并行优化**:运行时自动实现应用并行优化,无需额外优化成本; +- **应用编写中心化**:无需分布式背景知识,轻松编排分布式应用程序流程; +- **零成本自动迁移**:中心化的Multi-Agent应用可以轻松转化成分布式模式 + +本教程将详细介绍AgentScope分布式的实现原理和使用方法。 + +## 使用方法 + +AgentScope中,我们将运行应用流程的进程称为“主进程”,而所有的智能体都会运行在独立的进程当中。 +根据主进程和智能体进程之间关系的不同,AgentScope支持两种分布式模式:主从模式(Master-Slave)和对等模式(Peer-to-Peer,P2P)。 +主从模式中,开发者可以从主进程中启动所有的智能体进程,而对等模式中,智能体进程相对主进程来说是独立的,需要在对应的机器上启动智能体的服务。 + +上述概念有些复杂,但是不用担心,对于应用开发者而言,它们仅仅在创建智能体阶段有微小的差别。下面我们介绍如何创建分布式智能体。 + +### 步骤1: 创建分布式智能体 + +首先,开发者的智能体必须继承`agentscope.agents.AgentBase`类,`AgentBase`提供了`to_dist`方法将该Agent转化为其分布式版本。`to_dist`主要依靠以下的参数实现智能体分布式部署: + +- `host`: 用于部署智能体的机器IP地址,默认为`localhost`。 +- `port`: 智能体的RPC服务器端口,默认为`80`。 +- `launch_server`: 是否在本地启动RPC服务器,默认为`True`。 + +假设有两个智能体类`AgentA`和`AgentB`,它们都继承自 `AgentBase`。 +Suppose there are two agent classes `AgentA` and `AgentB`, both of which inherit from `AgentBase`. + +#### 主从模式 + +主从模式中,由于所有智能体进程依赖于主进程,因此所有进程实际运行在一台机器上。 +我们可以在主进程中启动所有智能体进程,即默认参数`launch_server=True`和`host="localhost"`,同时我们可以省略`port`参数,AgentScope将会为智能体进程自动寻找空闲的本地端口。 + +```python +a = AgentA( + name="A" + # ... +).to_dist() +``` + +#### 对等模式 + +对等模式中,我们需要首先在目标机器上启动对应智能体的服务,例如将`AgentA`的实例部署在IP为`a.b.c.d`的机器上,其对应的端口为12001。在这台目标机器上运行以下代码: + +```python +from agentscope.agents import RpcAgentServerLauncher + +# 创建智能体服务进程 +server_a = RpcAgentServerLauncher( + agent_class=AgentA, + agent_kwargs={ + "name": "A" + ... + }, + host="a.b.c.d", + port=12001, +) +# 启动服务 +server_a.launch() +server_a.wait_until_terminate() +``` + +然后,我们可以在主进程当中用以下的代码连接智能体服务,此时主进程中创建的对象`a`可以当做智能体的本地代理,允许开发者可以在主进程中采取中心化的方式编写应用流程。 + +```python +a = AgentA( + name="A", + ... +).to_dist( + host="a.b.c.d", + port=12001, + launch_server=False, +) +``` + +### 步骤2: 编排分布式应用流程 + +在AgentScope中,分布式应用流程的编排和非分布式的程序完全一致,开发者可以用中心化的方式编写全部应用流程。 +同时,AgentScope允许本地和分布式部署的智能体混合使用,开发者不用特意区分哪些智能体是本地的,哪些是分布式部署的。 + +以下是不同模式下实现两个智能体之间进行对话的全部代码,对比可见,AgentScope支持零代价将分布式应用流程从中心化向分布式迁移。 + +- 智能体全部中心化: + +```python +# 创建智能体对象 +a = AgentA( + name="A", + # ... +) + +b = AgentB( + name="B", + # ... +) + +# 应用流程编排 +x = None +while x is None or x.content == "exit": + x = a(x) + x = b(x) +``` + +- 智能体分布式部署(主从模式下): + +```python +# 创建智能体对象 +a = AgentA( + name="A" + # ... +).to_dist() + +b = AgentB( + name="B", + # ... +).to_dist() + +# 应用流程编排 +x = None +while x is None or x.content == "exit": + x = a(x) + x = b(x) +``` + +### 实现原理 + +#### Actor模式 + +[Actor模式](https://en.wikipedia.org/wiki/Actor_model)是大规模分布式系统中广泛使用的编程范式,同时也被应用于AgentScope平台的分布式设计中。 +在Actor模型中,一个actor是一个实体,它封装了自己的状态,并且仅通过消息传递与其他actor通信。 + +在AgentScope的分布式模式中,每个Agent都是一个Actor,并通过消息与其他Agent交互。消息的流转暗示了Agent的执行顺序。每个Agent都有一个`reply`方法,它消费一条消息并生成另一条消息,生成的消息可以发送给其他 Agent。例如,下面的图表显示了多个Agent的工作流程。`A`~`F`都是Agent,箭头代表消息。 + +```{mermaid} +graph LR; +A-->B +A-->C +B-->D +C-->D +E-->F +D-->F +``` + +其中,`B`和`C`可以在接收到来自`A`的消息后同时启动执行,而`E`可以立即运行,无需等待`A`、`B`、`C`和`D`。 +通过将每个Agent实现为一个Actor, Agent将自动等待其输入Msg准备好后开始执行`reply`方法,并且如果多个 Agent 的输入消息准备就绪,它们也可以同时自动执行`reply`,这避免了复杂的并行控制。 + +#### PlaceHolder + +同时,为了支持中心化的应用编排,AgentScope引入了Placeholder这一概念。Placeholder是一个特殊的消息,它包含了产生该Placeholder的智能体的地址和端口号,用于表示Agent的输入消息还未准备好。 +当Agent的输入消息准备好后,Placeholder会被替换为真实的消息,然后运行实际的`reply`方法 + +关于更加详细的技术实现方案,请参考我们的[论文](https://arxiv.org/abs/2402.14034)。 + +[[回到顶部]](#208-distribute-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/301-community.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/301-community.md new file mode 100644 index 000000000..39d85a42d --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/301-community.md @@ -0,0 +1,34 @@ +(301-community-zh)= + +# 加入AgentScope社区 + +加入AgentScope社区可以让您与其他用户和开发者建立联系。您可以分享见解、提出问题、并及时了解最新的进展和有趣的Multi-Agent应用程序。以下是加入我们的方法: + +## GitHub + +- **关注AgentScope仓库:** 通过关注[AgentScope 仓库](https://github.com/modelscope/agentscope) 以支持并随时了解我们的进展. +- **提交问题和拉取请求:** 如果您遇到任何问题或有建议,请向相关仓库提交问题。我们也欢迎拉取请求以修复错误、改进或添加新功能。 + +## Discord + +- **加入我们的Discord:** 实时与 AgentScope 社区合作。在[Discord](https://discord.gg/eYMpfnkG8h)上参与讨论,寻求帮助,并分享您的经验和见解。 + +## 钉钉 (DingTalk) + +- **在钉钉上联系:** 加入我们的钉钉群,随时了解有关 AgentScope 的新闻和更新。 + + 扫描下方的二维码加入钉钉群: + + AgentScope-dingtalk + + 我们的钉钉群邀请链接:[AgentScope 钉钉群](https://qr.dingtalk.com/action/joingroup?code=v1,k1,20IUyRX5XZQ2vWjKDsjvI9dhcXjGZi3bq1pFfDZINCM=&_dt_no_comment=1&origin=11) + +## 微信 + +扫描下方的二维码加入微信:AgentScope-wechat + +--- + +我们欢迎所有对AgentScope感兴趣的人加入我们的社区,并为平台的发展做出贡献! + +[[Return to the top]](#301-community-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/302-contribute.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/302-contribute.md new file mode 100644 index 000000000..f719ad4e6 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/302-contribute.md @@ -0,0 +1,70 @@ +(302-contribute-zh)= + +# 贡献到AgentScope + +我们的社区因其成员的多样化思想和贡献而兴旺发展。无论是修复一个错误,添加一个新功能,改进文档,还是添加示例,我们都欢迎您的帮助。以下是您做出贡献的方法: + +## 报告错误和提出新功能 + +当您发现一个错误或者有一个功能请求,请首先检查问题跟踪器,查看它是否已经被报告。如果没有,随时可以开设一个新的问题。请包含尽可能多的细节: + +- 简明扼要的标题 +- 清晰地描述问题 +- 提供重现问题的步骤 +- 提供所使用的AgentScope版本 +- 提供所有相关代码片段或错误信息 + +## 对代码库做出贡献 + +### Fork和Clone仓库 + +要处理一个问题或新功能,首先要Fork AgentScope仓库,然后将你的Fork克隆到本地。 + +```bash +git clone https://github.com/your-username/AgentScope.git +cd AgentScope +``` + +### 创建一个新分支 + +为您的工作创建一个新分支。这有助于保持拟议更改的组织性,并与`main`分支分离。 + +```bash +git checkout -b your-feature-branch-name +``` + +### 做出修改 + +创建您的新分支后就可以对代码进行修改了。请注意如果您正在解决多个问题或实现多个功能,最好为每个问题或功能创建单独的分支和拉取请求。 + +我们提供了一个开发者版本,与官方版本相比,它附带了额外的pre-commit钩子以执行格式检查: + +```bash +# 安装开发者版本 +pip install -e .[dev] +# 安装 pre-commit 钩子 +pre-commit install +``` + +### 提交您的修改 + +修改完成之后就是提交它们的时候了。请提供清晰而简洁的提交信息,以解释您的修改内容。 + +```bash +git add -U +git commit -m "修改内容的简要描述" +``` + +运行时您可能会收到 `pre-commit` 给出的错误信息。请根据错误信息修改您的代码然后再次提交。 + +### 提交 Pull Request + +当您准备好您的修改分支后,向AgentScope的 `main` 分支提交一个Pull Request。在您的Pull Request描述中,解释您所做的修改以及其他相关的信息。 + +我们将审查您的Pull Request。这个过程可能涉及一些讨论以及额外的代码修改。 + +### 代码审查 + +等待我们审核您的Pull Request。我们可能会提供一些更改或改进建议。请留意您的GitHub通知,并对反馈做出响应。 + +[[Return to the top]](#302-contribute-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/advance.rst b/docs/sphinx_doc/zh_CN/source/tutorial_zh/advance.rst new file mode 100644 index 000000000..9de74f5cd --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/advance.rst @@ -0,0 +1,14 @@ +进阶使用 +==================== + +.. toctree:: + :maxdepth: 2 + + 201-agent.md + 202-pipeline.md + 203-model.md + 204-service.md + 205-memory.md + 206-prompt.md + 207-monitor.md + 208-distribute.md diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/contribute.rst b/docs/sphinx_doc/zh_CN/source/tutorial_zh/contribute.rst new file mode 100644 index 000000000..d17f827f6 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/contribute.rst @@ -0,0 +1,8 @@ +参与贡献 +=============== + +.. toctree:: + :maxdepth: 2 + + 301-community.md + 302-contribute.md \ No newline at end of file diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/main.md b/docs/sphinx_doc/zh_CN/source/tutorial_zh/main.md new file mode 100644 index 000000000..70430a995 --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/main.md @@ -0,0 +1,35 @@ +# 欢迎来到 AgentScope 教程 + +AgentScope是一款全新的Multi-Agent框架,专为应用开发者打造,旨在提供高易用、高可靠的编程体验! + +- **高易用**:AgentScope支持纯Python编程,提供多种语法工具实现灵活的应用流程编排,内置丰富的API服务(Service)以及应用样例,供开发者直接使用。 + +- **高鲁棒**:确保开发便捷性和编程效率的同时,针对不同能力的大模型,AgentScope提供了全面的重试机制、定制化的容错控制和面向Agent的异常处理,以确保应用的稳定、高效运行; + +- **基于Actor的分布式机制**:AgentScope设计了一种新的基于Actor的分布式机制,实现了复杂分布式工作流的集中式编程和自动并行优化,即用户可以使用中心化编程的方式完成分布式应用的流程编排,同时能够零代价将本地应用迁移到分布式的运行环境中。 + +## 教程大纲 + +### 快速上手 + +- [安装](102-installation-zh) +- [基础概念](101-agentscope-zh) +- [快速上手案例](103-example-zh) +- [创建您的第一个应用](104-usecase-zh) +- [日志和WebUI](105-logging-zh) + +### 进阶使用 + +- [定制自己的Agent](201-agent-zh) +- [智能体间交互](202-pipeline-zh) +- [关于模型](203-model-zh) +- [关于服务](204-service-zh) +- [关于记忆](205-memory-zh) +- [提示工程](206-prompt-zh) +- [监控器](207-monitor-zh) +- [关于分布式](208-distribute-zh) + +### 参与贡献 + +- [加入 AgentScope 社区](301-community-zh) +- [为 AgentScope 做贡献](302-contribute-zh) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial_zh/quick_start.rst b/docs/sphinx_doc/zh_CN/source/tutorial_zh/quick_start.rst new file mode 100644 index 000000000..8153daefe --- /dev/null +++ b/docs/sphinx_doc/zh_CN/source/tutorial_zh/quick_start.rst @@ -0,0 +1,11 @@ +快速上手 +=============== + +.. toctree:: + :maxdepth: 2 + + 102-installation.md + 101-agentscope.md + 103-example.md + 104-usecase.md + 105-logging.md \ No newline at end of file