Skip to content

Commit

Permalink
Merge pull request modelscope#5 from FredericW/dev_copilot_zitao
Browse files Browse the repository at this point in the history
copilot dialog agents update
  • Loading branch information
ZiTao-Li authored Apr 19, 2024
2 parents 0fc5e16 + 5d6c0fe commit dd0d328
Show file tree
Hide file tree
Showing 5 changed files with 71 additions and 84 deletions.
59 changes: 9 additions & 50 deletions examples/conversation_with_RAG_agents/configs/agent_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"args": {
"name": "Tutorial-Assistant",
"description": "Tutorial-Assistant is an agent that can provide answer based on English tutorial material, mainly the markdown files. It can answer general questions about AgentScope.",
"sys_prompt": "You're an assistant helping new users to use AgentScope. You need to generate answers based on the provided context. You only provide the most relevant information. The answer is limited to be less than 100 words. If the key words of the question can be found in the provided context, the answer should contain the section which contains the answer, for example, saying 'You may refer to SECTION_NAME for more details.' The answers must to be itemized.",
"sys_prompt": "You're an assistant helping new users to use AgentScope. The language style is helpful and cheerful. You generate answers based on the provided context. The answer is expected to be no longer than 100 words. If the key words of the question can be found in the provided context, the answer should contain the section name which contains the answer. For example, 'You may refer to SECTION_NAME for more details.'",
"model_config_name": "qwen_config",
"emb_model_config_name": "qwen_emb_config",
"rag_config": {
Expand All @@ -21,7 +21,7 @@
},
"chunk_size": 2048,
"chunk_overlap": 40,
"similarity_top_k": 10,
"similarity_top_k": 5,
"log_retrieval": false,
"recent_n_mem": 1,
"persist_dir": "../../rag_storage/tutorial_assist"
Expand All @@ -32,8 +32,8 @@
"class": "LlamaIndexAgent",
"args": {
"name": "Code-Search-Assistant",
"description": "Code-Search-Assistant is an agent that can provide answer based on AgentScope code base. It can answer questions about specific functions and classes in AgentScope.",
"sys_prompt": "You're an assistant answering coding questions about AgentScope. The answer is sturucted as follows. (1) the key features, (2) an example showing how to use it, (3) where to find the example, and source codes.",
"description": "Code-Search-Assistant is an agent that can provide answer based on AgentScope code base. It can answer questions about specific modules in AgentScope.",
"sys_prompt": "You're a coding assistant of AgentScope. The answer starts with appreciation for the question, then provide details regarding the functionality and features of the modules mentioned in the question. The language should be in a professional and simple style. The answer is limited to be less than 100 words.",
"model_config_name": "qwen_config",
"emb_model_config_name": "qwen_emb_config",
"rag_config": {
Expand Down Expand Up @@ -64,62 +64,21 @@
},
"chunk_size": 2048,
"chunk_overlap": 40,
"similarity_top_k": 10,
"similarity_top_k": 5,
"log_retrieval": false,
"recent_n_mem": 1,
"persist_dir": "../../rag_storage/code_assist"
}
}
},
{
"class": "LlamaIndexAgent",
"args": {
"name": "API-Assistant",
"description": "API-Assistant is an agent that can provide answer based on docstrings of Agentscope. It can answer questions about whether there is any function in AgentScope and how to call the functions.",
"sys_prompt": "You're an assistant that know every API, function and class in AgentScope. You need to generate answers based on the provided context. You provide the most relevant information and tell people how to use the function or how to create objects of classes. You need to provide the name of module and give detailed information of how to find the function or class. The answer is limited to be less than 200 words.",
"model_config_name": "qwen_config",
"emb_model_config_name": "qwen_emb_config",
"rag_config": {
"load_data": {
"loader": {
"create_object": true,
"module": "llama_index.readers.docstring_walker",
"class": "DocstringWalker",
"init_args": {}
},
"query": "../../src/agentscope"
},
"store_and_index": {
"transformations": [
{
"create_object": true,
"module": "llama_index.core.node_parser",
"class": "TokenTextSplitter",
"init_args": {
"chunk_size": 1024,
"chunk_overlap": 20,
"separator": " "
}
}
]
},
"chunk_size": 2048,
"chunk_overlap": 40,
"similarity_top_k": 4,
"log_retrieval": true,
"recent_n_mem": 1,
"persist_dir": "../../rag_storage/docstring_assist"
}
}
},
{
"class": "DialogAgent",
"args": {
"name": "Summarize-Assistant",
"description": "Summarize-Assistant is an agent that can summarize multiple RAG agents' answers.",
"sys_prompt": "You summarize the answers of the previous two messages and remove the redundant information. The answer need to be simple and itemized. The answer needs to be less than 100 words.ex .",
"name": "Agent-Guiding-Assistant",
"description": "Agent-Guiding-Assistant is an agent that decide which agent should provide the answer next. It can answer questions about specific functions and classes in AgentScope.",
"sys_prompt": "You're an assistant guiding the user to specific agent for help. The answer is in a cheerful stypled language. The output starts with appreciation for the question. Next, rephrase the question in a simple declarative Sentence for example, 'I think you are asking...'. Last, if the question is a coding question, output '@ Code-Search-Assistant, you might be suitable for answering the question, otherwise, output '@ Tutorial-Assistant, I think you are more suitable for the question, please tell us more about it. The answer is expected to be one line only",
"model_config_name": "qwen_config",
"use_memory": true
"use_memory": false
}
}
]
37 changes: 37 additions & 0 deletions examples/conversation_with_RAG_agents/groupchat_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
""" Group chat utils."""
import re
from typing import Sequence


def select_next_one(agents: Sequence, rnd: int) -> Sequence:
"""
Select next agent.
"""
return agents[rnd % len(agents)]


def filter_agents(string: str, agents: Sequence) -> Sequence:
"""
This function filters the input string for occurrences of the given names
prefixed with '@' and returns a list of the found names.
"""
if len(agents) == 0:
return []

# Create a pattern that matches @ followed by any of the candidate names
pattern = (
r"@(" + "|".join(re.escape(agent.name) for agent in agents) + r")\b"
)

# Find all occurrences of the pattern in the string
matches = re.findall(pattern, string)

# Create a dictionary mapping agent names to agent objects for quick lookup
agent_dict = {agent.name: agent for agent in agents}

# Return the list of matched agent objects preserving the order
ordered_agents = [
agent_dict[name] for name in matches if name in agent_dict
]
return ordered_agents
3 changes: 2 additions & 1 deletion examples/conversation_with_RAG_agents/rag/llama_index_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@
BaseEmbedding, Embedding = None, None
IngestionPipeline, BasePydanticVectorStore, VectorStore = None, None, None
NodeParser, SentenceSplitter = None, None
VectorStoreIndex = None
VectorStoreIndex, StorageContext = None, None
load_index_from_storage = None
PrivateAttr = None

from rag import RAGBase
Expand Down
1 change: 1 addition & 0 deletions examples/conversation_with_RAG_agents/rag_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,7 @@ def init_rag(self) -> LlamaIndexRAG:
load_data_args = {
"loader": SimpleDirectoryReader(self.config["data_path"]),
}
# NOTE: "data_path" is never used/defined for the current version.
logger.info(f"rag.load_data args: {load_data_args}")
docs = rag.load_data(**load_data_args)

Expand Down
55 changes: 22 additions & 33 deletions examples/conversation_with_RAG_agents/rag_example.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
A simple example for conversation between user and
an agent with RAG capability.
An example for conversation between user and agents with RAG capability.
One agent is a tutorial assistant, the other is a code explainer.
"""
import json
import os
Expand All @@ -11,8 +11,6 @@

import agentscope
from agentscope.agents import UserAgent

from agentscope.message import Msg
from agentscope.agents import DialogAgent


Expand All @@ -34,54 +32,45 @@ def main() -> None:

with open("configs/agent_config.json", "r", encoding="utf-8") as f:
agent_configs = json.load(f)
# define RAG-based agents for tutorial and code
tutorial_agent = LlamaIndexAgent(**agent_configs[0]["args"])
code_explain_agent = LlamaIndexAgent(**agent_configs[1]["args"])
api_agent = LlamaIndexAgent(**agent_configs[2]["args"])
agent_configs[3]["args"].pop("description")
summarize_agent = DialogAgent(**agent_configs[3]["args"])
# define a guide agent
agent_configs[2]["args"].pop("description")
guide_agent = DialogAgent(**agent_configs[2]["args"])
rag_agents = [
tutorial_agent,
code_explain_agent,
api_agent,
]
rag_agent_names = [agent.name for agent in rag_agents]
summarize_agents = [summarize_agent]
summarize_agent_names = [agent.name for agent in summarize_agents]
helper_agents = rag_agents + summarize_agents

user_agent = UserAgent()
# start the conversation between user and assistant
while True:
"""
The workflow is the following:
1. user input a message,
2. if it mentions one of the agents, then the agent will be called
3. otherwise, the guide agent will be decide which agent to call
4. the called agent will response to the user
5. repeat
"""
x = user_agent()
x.role = "user" # to enforce dashscope requirement on roles
if len(x["content"]) == 0 or str(x["content"]).startswith("exit"):
break
speak_list = filter_agents(x.get("content", ""), helper_agents)
speak_list = filter_agents(x.get("content", ""), rag_agents)
if len(speak_list) == 0:
# if no agent is @ (mentioned), default invoke all rag agents and
# summarize agents
speak_list = rag_agents + summarize_agents
for agent in speak_list:
if agent.name in summarize_agent_names:
# if summarize agent is mentioned, then also call rag agents
# TODO: let summarize agent choose which agent to call
speak_list = rag_agents + summarize_agents

guide_response = guide_agent(x)
# Only one agent can be called in the current version,
# we may support multi-agent conversation later
speak_list = filter_agents(guide_response.get("content", ""),
rag_agents)
agent_name_list = [agent.name for agent in speak_list]
rag_agent_responses = []
for agent_name, agent in zip(agent_name_list, speak_list):
if agent_name in rag_agent_names:
rag_agent_responses.append(agent(x))

msg = Msg(
name="user",
role="user",
content="/n".join([msg.content for msg in rag_agent_responses]),
)
for agent_name, agent in zip(agent_name_list, speak_list):
if agent_name in summarize_agent_names:
agent(msg)
agent(x)


if __name__ == "__main__":
main()

0 comments on commit dd0d328

Please sign in to comment.