and act iteratively to solve problems. More details can be found in the paper
https://arxiv.org/abs/2210.03629."""
-importjson
-fromtypingimportTuple,List
+fromtypingimportAny
+fromloguruimportlogger
+
+fromagentscope.exceptionimportResponseParsingError,FunctionCallErrorfromagentscope.agentsimportAgentBasefromagentscope.messageimportMsg
-fromagentscope.modelsimportResponseParser,ResponseParsingError
-fromagentscope.serviceimportServiceResponse,ServiceExecStatus
-
-
-DEFAULT_TOOL_PROMPT="""The following tool functions are available in the format of
-```
-{{index}}. {{function name}}: {{function description}}
- {{argument1 name}} ({{argument type}}): {{argument description}}
- {{argument2 name}} ({{argument type}}): {{argument description}}
- ...
-```
+fromagentscope.parsersimportMarkdownJsonDictParser
+fromagentscope.serviceimportServiceToolkit
+fromagentscope.service.service_toolkitimportServiceFunction
-## Tool Functions:
-{function_prompt}
-
-## What You Should Do:
+INSTRUCTION_PROMPT="""## What You Should Do:1. First, analyze the current situation, and determine your goal.2. Then, check if your goal is already achieved. If so, try to generate a response. Otherwise, think about how to achieve it with the help of provided tool functions.3. Respond in the required format.
@@ -131,37 +124,8 @@
Source code for agentscope.agents.react_agent
3. Make sure the types and values of the arguments you provided to the tool functions are correct.
4. Don't take things for granted. For example, where you are, what's the time now, etc. You can try to use the tool functions to get information.5. If the function execution fails, you should analyze the error and try to solve it.
-
"""# noqa
-TOOL_HINT_PROMPT="""
-## Response Format:
-You should respond with a JSON object in the following format, which can be loaded by `json.loads` in Python directly. If no tool function is used, the "function" field should be an empty list.
-{
- "thought": "what you thought",
- "speak": "what you said",
- "function": [{"name": "{function name}", "arguments": {"{argument1 name}": xxx, "{argument2 name}": xxx}}]
-}"""# noqa
-
-FUNCTION_RESULT_TITLE_PROMPT="""Execution Results:
-"""
-
-FUNCTION_RESULT_PROMPT="""{index}. {function_name}:
- [EXECUTE STATUS]: {status}
- [EXECUTE RESULT]: {result}
-"""
-
-ERROR_INFO_PROMPT="""Your response is not a JSON object, and cannot be parsed by `json.loads` in parse function:
-## Your Response:
-[YOUR RESPONSE BEGIN]
-{response}
-[YOUR RESPONSE END]
-
-## Error Information:
-{error_info}
-
-Analyze the reason, and re-correct your response in the correct format."""# pylint: disable=all # noqa
-
self,name:str,model_config_name:str,
- tools:List[Tuple],
+ service_toolkit:ServiceToolkit=None,sys_prompt:str="You're a helpful assistant. Your name is {name}.",max_iters:int=10,verbose:bool=True,
+ **kwargs:Any,)->None:"""Initialize the ReAct agent with the given name, model config name and tools.
@@ -197,13 +162,14 @@
Source code for agentscope.agents.react_agent
model_config_name (`str`):
The name of the model config, which is used to load model from configuration.
- tools (`List[Tuple]`):
- A list of tuples, each containing the name of a tool and the
- tool's description in JSON schema format.
+ service_toolkit (`ServiceToolkit`):
+ A `ServiceToolkit` object that contains the tool functions. max_iters (`int`, defaults to `10`): The maximum number of iterations of the reasoning-acting loops. verbose (`bool`, defaults to `True`):
- Whether to print the output of the tools.
+ Whether to print the detailed information during reasoning and
+ acting steps. If `False`, only the content in speak field will
+ be print out. """super().__init__(name=name,
@@ -211,24 +177,69 @@
Source code for agentscope.agents.react_agent
model_config_name=model_config_name,)
- self.tools=tools
- self.verbose=verbose
- self.max_iters=max_iters
+ # TODO: To compatible with the old version, which will be deprecated
+ # soon
+ if"tools"inkwargs:
+ logger.warning(
+ "The argument `tools` will be deprecated soon. "
+ "Please use `service_toolkit` instead. Example refers to "
+ "https://github.com/modelscope/agentscope/blob/main/"
+ "examples/conversation_with_react_agent/code/"
+ "conversation_with_react_agent.py",
+ )
- func_prompt,self.func_name_mapping=self.prepare_funcs_prompt(tools)
+ service_funcs={}
+ forfunc,json_schemainkwargs["tools"]:
+ name=json_schema["function"]["name"]
+ service_funcs[name]=ServiceFunction(
+ name=name,
+ original_func=func,
+ processed_func=func,
+ json_schema=json_schema,
+ )
- # Prepare system prompt
- tools_prompt=DEFAULT_TOOL_PROMPT.format(function_prompt=func_prompt)
+ ifservice_toolkitisNone:
+ service_toolkit=ServiceToolkit()
+ service_toolkit.service_funcs=service_funcs
+ else:
+ service_toolkit.service_funcs.update(service_funcs)
- ifsys_prompt.endswith("\n"):
- self.sys_prompt=sys_prompt.format(name=self.name)+tools_prompt
- else:
- self.sys_prompt=(
- sys_prompt.format(name=self.name)+"\n"+tools_prompt
+ elifservice_toolkitisNone:
+ raiseValueError(
+ "The argument `service_toolkit` is required to initialize "
+ "the ReActAgent.",)
+ self.service_toolkit=service_toolkit
+ self.verbose=verbose
+ self.max_iters=max_iters
+
+ ifnotsys_prompt.endswith("\n"):
+ sys_prompt=sys_prompt+"\n"
+
+ self.sys_prompt="\n".join(
+ [
+ # The brief intro of the role and target
+ sys_prompt.format(name=self.name),
+ # The instruction prompt for tools
+ self.service_toolkit.tools_instruction,
+ # The detailed instruction prompt for the agent
+ INSTRUCTION_PROMPT,
+ ],
+ )
+
# Put sys prompt into memory
- self.memory.add(Msg("system",self.sys_prompt,role="system"))
+ self.memory.add(Msg("system",self.sys_prompt,role="system"))
+
+ # Initialize a parser object to formulate the response from the model
+ self.parser=MarkdownJsonDictParser(
+ content_hint={
+ "thought":"what you thought",
+ "speak":"what you speak",
+ "function":service_toolkit.tools_calling_format,
+ },
+ required_keys=["thought","speak","function"],
+ )
@@ -237,41 +248,67 @@
Source code for agentscope.agents.react_agent
"""The reply function that achieves the ReAct algorithm. The more details please refer to https://arxiv.org/abs/2210.03629"""
- ifself.memory:
- self.memory.add(x)
+ self.memory.add(x)for_inrange(self.max_iters):# Step 1: Thought
+ ifself.verbose:
+ self.speak(f" ITER {_+1}, STEP 1: REASONING ".center(70,"#"))
+
+ # Prepare hint to remind model what the response format is
+ # Won't be recorded in memory to save tokens
+ hint_msg=Msg(
+ "system",
+ self.parser.format_instruction,
+ role="system",
+ )
+ ifself.verbose:
+ self.speak(hint_msg)
- self.speak(f" ITER {_+1}, STEP 1: REASONING ".center(70,"#"))
+ # Prepare prompt for the model
+ prompt=self.model.format(self.memory.get_memory(),hint_msg)
+ # Generate and parse the responsetry:
- hint_msg=Msg("system",TOOL_HINT_PROMPT,role="system")
- self.memory.add(hint_msg)
-
- # Generate LLM response
- prompt=self.model.format(self.memory.get_memory())res=self.model(prompt,
- parse_func=ResponseParser.to_dict,
+ parse_func=self.parser.parse,max_retries=1,
- ).json
+ )
+
+ # Record the response in memory
+ msg_response=Msg(self.name,res.text,"assistant")
+ self.memory.add(msg_response)
+ # Print out the response
+ ifself.verbose:
+ self.speak(msg_response)
+ else:
+ self.speak(
+ Msg(self.name,res.parsed["speak"],"assistant"),
+ )
+
+ # Skip the next steps if no need to call tools
+ # The parsed field is a dictionary
+ arg_function=res.parsed["function"]
+ if(
+ isinstance(arg_function,str)
+ andarg_functionin["[]",""]
+ orisinstance(arg_function,list)
+ andlen(arg_function)==0
+ ):
+ # Only the speak field is exposed to users or other agents
+ returnMsg(self.name,res.parsed["speak"],"assistant")
+
+ # Only catch the response parsing error and expose runtime
+ # errors to developers for debuggingexceptResponseParsingErrorase:
- # Record the wrong response from the model
- response_msg=Msg(self.name,e.response.text,"assistant")
+ # Print out raw response from models for developers to debug
+ response_msg=Msg(self.name,e.raw_response,"assistant")self.speak(response_msg)# Re-correct by model itself
- error_msg=Msg(
- "system",
- ERROR_INFO_PROMPT.format(
- parse_func=ResponseParser.to_dict,
- error_info=e.error_info,
- response=e.response.text,
- ),
- "system",
- )
+ error_msg=Msg("system",str(e),"system")self.speak(error_msg)self.memory.add([response_msg,error_msg])
@@ -279,160 +316,51 @@
Source code for agentscope.agents.react_agent
# Skip acting step to re-correct the response
continue
- # Record the response in memory
- msg_thought=Msg(self.name,res,role="assistant")
-
- # To better display the response, we reformat it by json.dumps here
- self.speak(
- Msg(self.name,json.dumps(res,indent=4),role="assistant"),
- )
-
- ifself.memory:
- self.memory.add(msg_thought)
-
- # Skip the next steps if no need to call tools
- iflen(res.get("function",[]))==0:
- returnmsg_thought
+ # Step 2: Acting
+ ifself.verbose:
+ self.speak(f" ITER {_+1}, STEP 2: ACTING ".center(70,"#"))
- # Step 2: Action
-
- self.speak(f" ITER {_+1}, STEP 2: ACTION ".center(70,"#"))
-
- # Execute functions
- # TODO: check the provided arguments and re-correct them if needed
- execute_results=[]
- fori,funcinenumerate(res["function"]):
- # Execute the function
- func_res=self.execute_func(i,func)
- execute_results.append(func_res)
-
- # Prepare prompt for execution results
- execute_results_prompt="\n".join(
- [
- FUNCTION_RESULT_PROMPT.format_map(res)
- forresinexecute_results
- ],
- )
- # Add title
- execute_results_prompt=(
- FUNCTION_RESULT_TITLE_PROMPT+execute_results_prompt
- )
+ # Parse, check and execute the tool functions in service toolkit
+ try:
+ execute_results=self.service_toolkit.parse_and_call_func(
+ res.parsed["function"],
+ )
- # Note: Observing the execution results and generate response are
- # finished in the next loop. We just put the execution results
- # into memory, and wait for the next loop to generate response.
+ # Note: Observing the execution results and generate response
+ # are finished in the next reasoning step. We just put the
+ # execution results into memory, and wait for the next loop
+ # to generate response.
- # Record execution results into memory as a message from the system
- msg_res=Msg(
- name="system",
- content=execute_results_prompt,
- role="system",
- )
- self.speak(msg_res)
- ifself.memory:
+ # Record execution results into memory as system message
+ msg_res=Msg("system",execute_results,"system")
+ self.speak(msg_res)self.memory.add(msg_res)
- returnMsg(
+ exceptFunctionCallErrorase:
+ # Catch the function calling error that can be handled by
+ # the model
+ error_msg=Msg("system",str(e),"system")
+ self.speak(error_msg)
+ self.memory.add(error_msg)
+
+ # Exceed the maximum iterations
+ hint_msg=Msg("system",
- "The agent has reached the maximum iterations.",
+ "You have failed to generate a response in the maximum "
+ "iterations. Now generate a reply by summarizing the current "
+ "situation.",role="system",
- )
-
-
-
-[docs]
- defexecute_func(self,index:int,func_call:dict)->dict:
-"""Execute the tool function and return the result.
-
- Args:
- index (`int`):
- The index of the tool function.
- func_call (`dict`):
- The function call dictionary with keys 'name' and 'arguments'.
-
- Returns:
- `ServiceResponse`: The execution results.
- """
- # Extract the function name and arguments
- func_name=func_call["name"]
- func_args=func_call["arguments"]
-
- self.speak(f">>> Executing function {func_name} ...")
-
- try:
- func_res=self.func_name_mapping[func_name](**func_args)
- exceptExceptionase:
- func_res=ServiceResponse(
- status=ServiceExecStatus.ERROR,
- content=str(e),
- )
-
- self.speak(">>> END ")
-
- status=(
- "SUCCESS"
- iffunc_res.status==ServiceExecStatus.SUCCESS
- else"FAILED")
+ ifself.verbose:
+ self.speak(hint_msg)
- # return the result of the function
- return{
- "index":index+1,
- "function_name":func_name,
- "status":status,
- "result":func_res.content,
- }
-
-
-
-[docs]
- defprepare_funcs_prompt(self,tools:List[Tuple])->Tuple[str,dict]:
-"""Convert function descriptions from json schema format to
- string prompt format.
-
- Args:
- tools (`List[Tuple]`):
- The list of tool functions and their descriptions in JSON
- schema format.
-
- Returns:
- `Tuple[str, dict]`:
- The string prompt for the tool functions and a function name
- mapping dict.
-
- .. code-block:: python
-
- {index}. {function name}: {function description}
- {argument name} ({argument type}): {argument description}
- ...
-
- """
- tools_prompt=[]
- func_name_mapping={}
- fori,(func,desc)inenumerate(tools):
- func_name=desc["function"]["name"]
- func_name_mapping[func_name]=func
-
- func_desc=desc["function"]["description"]
- args_desc=desc["function"]["parameters"]["properties"]
-
- args_list=[f"{i+1}. {func_name}: {func_desc}"]
- forargs_name,args_infoinargs_desc.items():
- if"type"inargs_info:
- args_line=(
- f'\t{args_name} ({args_info["type"]}): '
- f'{args_info.get("description","")}'
- )
- else:
- args_line=(
- f'\t{args_name}: {args_info.get("description","")}'
- )
- args_list.append(args_line)
-
- func_prompt="\n".join(args_list)
- tools_prompt.append(func_prompt)
+ # Generate a reply by summarizing the current situation
+ prompt=self.model.format(self.memory.get_memory(),hint_msg)
+ res=self.model(prompt)
+ res_msg=Msg(self.name,res.text,"assistant")
+ self.speak(res_msg)
- return"\n".join(tools_prompt),func_name_mapping
+[docs]
+classResponseParsingError(Exception):
+"""The exception class for response parsing error with uncertain
+ reasons."""
+
+ raw_response:str
+"""Record the raw response."""
+
+
+[docs]
+ def__init__(self,message:str,raw_response:str=None)->None:
+"""Initialize the exception with the message."""
+ self.message=message
+ self.raw_response=raw_response
+[docs]
+classJsonParsingError(ResponseParsingError):
+"""The exception class for JSON parsing error."""
+
+
+
+
+[docs]
+classJsonTypeError(ResponseParsingError):
+"""The exception class for JSON type error."""
+
+
+
+
+[docs]
+classRequiredFieldNotFoundError(ResponseParsingError):
+"""The exception class for missing required field in model response, when
+ the response is required to be a JSON dict object with required fields."""
+
+
+
+
+[docs]
+classTagNotFoundError(ResponseParsingError):
+"""The exception class for missing tagged content in model response."""
+
+ missing_begin_tag:bool
+"""If the response misses the begin tag."""
+
+ missing_end_tag:bool
+"""If the response misses the end tag."""
+
+
+[docs]
+ def__init__(
+ self,
+ message:str,
+ raw_response:str=None,
+ missing_begin_tag:bool=True,
+ missing_end_tag:bool=True,
+ ):
+"""Initialize the exception with the message.
+
+ Args:
+ raw_response (`str`):
+ Record the raw response from the model.
+ missing_begin_tag (`bool`, defaults to `True`):
+ If the response misses the beginning tag, default to `True`.
+ missing_end_tag (`bool`, defaults to `True`):
+ If the response misses the end tag, default to `True`.
+ """
+ super().__init__(message,raw_response)
+
+ self.missing_begin_tag=missing_begin_tag
+ self.missing_end_tag=missing_end_tag
+
+
+
+
+# - Function Calling Exceptions
+
+
+
+[docs]
+classFunctionCallError(Exception):
+"""The base class for exception raising during calling functions."""
+
+
)cfgs=configs
+ ifcfgsisNone:
+ raiseTypeError(
+ f"Invalid type of model_configs, it could be a dict, a list of "
+ f"dicts, or a path to a json file (containing a dict or a list "
+ f"of dicts), but got {type(configs)}",
+ )
+
format_configs=_ModelConfig.format_configs(configs=cfgs)# check if name is unique
diff --git a/en/_modules/agentscope/models/dashscope_model.html b/en/_modules/agentscope/models/dashscope_model.html
index 8449bc4a4..96b8f8ec1 100644
--- a/en/_modules/agentscope/models/dashscope_model.html
+++ b/en/_modules/agentscope/models/dashscope_model.html
@@ -62,6 +62,8 @@
embedding:Sequence=None,image_urls:Sequence[str]=None,raw:Any=None,
+ parsed:Any=None,)->None:"""Initialize the model response.
@@ -142,13 +144,36 @@
Source code for agentscope.models.response
The image URLs returned by the model. raw (`Any`, optional): The raw data returned by the model.
+ parsed (`Any`, optional):
+ The parsed data returned by the model. """self.text=textself.embedding=embeddingself.image_urls=image_urls
- self.raw=raw
+ self.raw=raw
+ self.parsed=parsed
+ def__getattribute__(self,item:str)->Any:
+"""Warning for the deprecated json attribute."""
+ ifitem=="json":
+ logger.warning(
+ "The json attribute in ModelResponse class is deprecated. Use"
+ " parsed attribute instead.",
+ )
+
+ returnsuper().__getattribute__(item)
+
+ def__setattr__(self,key:str,value:Any)->Optional[Any]:
+"""Warning for the deprecated json attribute."""
+ ifkey=="json":
+ logger.warning(
+ "The json attribute in ModelResponse class is deprecated. Use"
+ " parsed attribute instead.",
+ )
+
+ returnsuper().__setattr__(key,value)
+
def__str__(self)->str:if_is_json_serializable(self.raw):raw=self.raw
@@ -159,126 +184,11 @@
-[docs]
-classResponseParser:
-"""A class that contains several static methods to parse the response."""
-
-
-[docs]
- @classmethod
- defto_dict(cls,response:ModelResponse)->ModelResponse:
-"""Parse the response text to a dict, and feed it into the `json`
- field."""
- text=response.text
- iftextisnotNone:
- logger.debug("Text before parsing",text)
-
- # extract from the first '{' to the last '}'
- index_start=max(text.find("{"),0)
- index_end=min(text.rfind("}")+1,len(text))
-
- text=text[index_start:index_end]
- logger.debug("Text after parsing",text)
-
- response.text=text
- response.json=json.loads(text)
- returnresponse
- else:
- raiseValueError(
- f"The text field of the model response is None: {response}",
- )
-
-
-
-[docs]
- @classmethod
- defto_list(cls,response:ModelResponse)->ModelResponse:
-"""Parse the response text to a list, and feed it into the `json`
- field."""
- text=response.text
- iftextisnotNone:
- logger.debug("Text before parsing",text)
-
- # extract from the first '{' to the last '}'
- index_start=max(text.find("["),0)
- index_end=min(text.rfind("]")+1,len(text))
-
- text=text[index_start:index_end]
- logger.debug("Text after parsing",text)
-
- response.text=text
- response.json=json.loads(text)
- returnresponse
- else:
- raiseValueError(
- f"The text field of the model response is None: {response}",
- )
-
-
-
-
-
-[docs]
-classResponseParsingError(Exception):
-"""Exception raised when parsing the response fails."""
-
- parse_func:str
-"""The source code of the parsing function."""
-
- error_info:str
-"""The detail information of the error."""
-
- response:ModelResponse
-"""The response that fails to be parsed."""
-
-
-[docs]
- def__init__(
- self,
- *args:Any,
- parse_func:Callable,
- error_info:str,
- response:ModelResponse,
- **kwargs:Any,
- )->None:
-"""Initialize the exception.
-
- Args:
- parse_func (`str`):
- The source code of the parsing function.
- error_info (`str`):
- The detail information of the error.
- response (`ModelResponse`):
- The response that fails to be parsed.
- """
- super().__init__(*args,**kwargs)
-
- self.parse_func_code=inspect.getsource(parse_func)
- self.error_info=error_info
- self.response=response
Source code for agentscope.parsers.code_block_parser
+# -*- coding: utf-8 -*-
+"""Model response parser class for Markdown code block."""
+fromagentscope.modelsimportModelResponse
+fromagentscope.parsersimportParserBase
+
+
+
+[docs]
+classMarkdownCodeBlockParser(ParserBase):
+"""The base class for parsing the response text by fenced block."""
+
+ name:str="{language_name} block"
+"""The name of the parser."""
+
+ tag_begin:str="```{language_name}"
+"""The beginning tag."""
+
+ content_hint:str="${your_{language_name}_CODE}"
+"""The hint of the content."""
+
+ tag_end:str="```"
+"""The ending tag."""
+
+ format_instruction:str=(
+ "You should generate {language_name} code in a {language_name} fenced "
+ "code block as follows: \n```{language_name}\n"
+ "${your_{language_name}_CODE}\n```"
+ )
+"""The instruction for the format of the code block."""
+
+
+[docs]
+ defparse(self,response:ModelResponse)->ModelResponse:
+"""Extract the content between the tag_begin and tag_end in the
+ response and store it in the parsed field of the response object.
+ """
+
+ extract_text=self._extract_first_content_by_tag(
+ response,
+ self.tag_begin,
+ self.tag_end,
+ )
+ response.parsed=extract_text
+ returnresponse
Source code for agentscope.parsers.json_object_parser
+# -*- coding: utf-8 -*-
+"""The parser for JSON object in the model response."""
+importjson
+fromcopyimportdeepcopy
+fromtypingimportOptional,Any,List
+
+fromloguruimportlogger
+
+fromagentscope.exceptionimport(
+ TagNotFoundError,
+ JsonParsingError,
+ JsonTypeError,
+ RequiredFieldNotFoundError,
+)
+fromagentscope.modelsimportModelResponse
+fromagentscope.parsersimportParserBase
+fromagentscope.utils.toolsimport_join_str_with_comma_and
+
+
+
+[docs]
+classMarkdownJsonObjectParser(ParserBase):
+"""A parser to parse the response text to a json object."""
+
+ name:str="json block"
+"""The name of the parser."""
+
+ tag_begin:str="```json"
+"""Opening tag for a code block."""
+
+ content_hint:str="{your_json_object}"
+"""The hint of the content."""
+
+ tag_end:str="```"
+"""Closing end for a code block."""
+
+ _format_instruction=(
+ "You should respond a json object in a json fenced code block as "
+ "follows:\n```json\n{content_hint}\n```"
+ )
+"""The instruction for the format of the json object."""
+
+
+[docs]
+ def__init__(self,content_hint:Optional[Any]=None)->None:
+"""Initialize the parser with the content hint.
+
+ Args:
+ content_hint (`Optional[Any]`, defaults to `None`):
+ The hint used to remind LLM what should be fill between the
+ tags. If it is a string, it will be used as the content hint
+ directly. If it is a dict, it will be converted to a json
+ string and used as the content hint.
+ """
+ ifcontent_hintisnotNone:
+ ifisinstance(content_hint,str):
+ self.content_hint=content_hint
+ else:
+ self.content_hint=json.dumps(
+ content_hint,
+ ensure_ascii=False,
+ )
+
+
+
+[docs]
+ defparse(self,response:ModelResponse)->ModelResponse:
+"""Parse the response text to a json object, and fill it in the parsed
+ field in the response object."""
+
+ # extract the content and try to fix the missing tags by hand
+ try:
+ extract_text=self._extract_first_content_by_tag(
+ response,
+ self.tag_begin,
+ self.tag_end,
+ )
+ exceptTagNotFoundErrorase:
+ # Try to fix the missing tag error by adding the tag
+ try:
+ response_copy=deepcopy(response)
+
+ # Fix the missing tags
+ ife.missing_begin_tag:
+ response_copy.text=(
+ self.tag_begin+"\n"+response_copy.text
+ )
+ ife.missing_end_tag:
+ response_copy.text=response_copy.text+self.tag_end
+
+ # Try again to extract the content
+ extract_text=self._extract_first_content_by_tag(
+ response_copy,
+ self.tag_begin,
+ self.tag_end,
+ )
+
+ # replace the response with the fixed one
+ response.text=response_copy.text
+
+ logger.debug("Fix the missing tags by adding them manually.")
+
+ exceptTagNotFoundError:
+ # Raise the original error if the missing tags cannot be fixed
+ raiseefromNone
+
+ # Parse the content into JSON object
+ try:
+ parsed_json=json.loads(extract_text)
+ response.parsed=parsed_json
+ returnresponse
+ exceptjson.decoder.JSONDecodeErrorase:
+ raw_response=f"{self.tag_begin}{extract_text}{self.tag_end}"
+ raiseJsonParsingError(
+ f"The content between {self.tag_begin} and {self.tag_end} "
+ f"MUST be a JSON object."
+ f'When parsing "{raw_response}", an error occurred: {e}',
+ raw_response=raw_response,
+ )fromNone
+
+
+ @property
+ defformat_instruction(self)->str:
+"""Get the format instruction for the json object, if the
+ format_example is provided, it will be used as the example.
+ """
+ returnself._format_instruction.format(
+ content_hint=self.content_hint,
+ )
+
+
+
+
+[docs]
+classMarkdownJsonDictParser(MarkdownJsonObjectParser):
+"""A class used to parse a JSON dictionary object in a markdown fenced
+ code"""
+
+ name:str="json block"
+"""The name of the parser."""
+
+ tag_begin:str="```json"
+"""Opening tag for a code block."""
+
+ content_hint:str="{your_json_dictionary}"
+"""The hint of the content."""
+
+ tag_end:str="```"
+"""Closing end for a code block."""
+
+ _format_instruction=(
+ "You should respond a json object in a json fenced code block as "
+ "follows:\n```json\n{content_hint}\n```"
+ )
+"""The instruction for the format of the json object."""
+
+ required_keys:List[str]
+"""A list of required keys in the JSON dictionary object. If the response
+ misses any of the required keys, it will raise a
+ RequiredFieldNotFoundError."""
+
+
+[docs]
+ def__init__(
+ self,
+ content_hint:Optional[Any]=None,
+ required_keys:List[str]=None,
+ )->None:
+"""Initialize the parser with the content hint.
+
+ Args:
+ content_hint (`Optional[Any]`, defaults to `None`):
+ The hint used to remind LLM what should be fill between the
+ tags. If it is a string, it will be used as the content hint
+ directly. If it is a dict, it will be converted to a json
+ string and used as the content hint.
+ required_keys (`List[str]`, defaults to `[]`):
+ A list of required keys in the JSON dictionary object. If the
+ response misses any of the required keys, it will raise a
+ RequiredFieldNotFoundError.
+ """
+ super().__init__(content_hint)
+
+ self.required_keys=required_keysor[]
+
+
+
+[docs]
+ defparse(self,response:ModelResponse)->ModelResponse:
+"""Parse the text field of the response to a JSON dictionary object,
+ store it in the parsed field of the response object, and check if the
+ required keys exists.
+ """
+ # Parse the JSON object
+ response=super().parse(response)
+
+ ifnotisinstance(response.parsed,dict):
+ # If not a dictionary, raise an error
+ raiseJsonTypeError(
+ "A JSON dictionary object is wanted, "
+ f"but got {type(response.parsed)} instead.",
+ response.text,
+ )
+
+ # Check if the required keys exist
+ keys_missing=[]
+ forkeyinself.required_keys:
+ ifkeynotinresponse.parsed:
+ keys_missing.append(key)
+
+ iflen(keys_missing)!=0:
+ raiseRequiredFieldNotFoundError(
+ f"Missing required "
+ f"field{''iflen(keys_missing)==1else's'} "
+ f"{_join_str_with_comma_and(keys_missing)} in the JSON "
+ f"dictionary object.",
+ response.text,
+ )
+
+ returnresponse
+# -*- coding: utf-8 -*-
+"""The base class for model response parser."""
+fromabcimportABC,abstractmethod
+
+fromagentscope.exceptionimportTagNotFoundError
+fromagentscope.modelsimportModelResponse
+
+
+
+[docs]
+classParserBase(ABC):
+"""The base class for model response parser."""
+
+
+[docs]
+ @abstractmethod
+ defparse(self,response:ModelResponse)->ModelResponse:
+"""Parse the response text to a specific object, and stored in the
+ parsed field of the response object."""
+
+
+ def_extract_first_content_by_tag(
+ self,
+ response:ModelResponse,
+ tag_start:str,
+ tag_end:str,
+ )->str:
+"""Extract the first text content between the tag_start and tag_end
+ in the response text. Note this function does not support nested.
+
+ Args:
+ response (`ModelResponse`):
+ The response object.
+ tag_start (`str`):
+ The start tag.
+ tag_end (`str`):
+ The end tag.
+
+ Returns:
+ `str`: The extracted text content.
+ """
+ text=response.text
+
+ index_start=text.find(tag_start)
+
+ # Avoid the case that tag_begin contains tag_end, e.g. ```json and ```
+ ifindex_start==-1:
+ index_end=text.find(tag_end,0)
+ else:
+ index_end=text.find(tag_end,index_start+len(tag_start))
+
+ ifindex_start==-1orindex_end==-1:
+ missing_tags=[]
+ ifindex_start==-1:
+ missing_tags.append(tag_start)
+ ifindex_end==-1:
+ missing_tags.append(tag_end)
+
+ raiseTagNotFoundError(
+ f"Missing "
+ f"tag{''iflen(missing_tags)==1else's'} "
+ f"{' and '.join(missing_tags)} in response.",
+ raw_response=text,
+ missing_begin_tag=index_start==-1,
+ missing_end_tag=index_end==-1,
+ )
+
+ extract_text=text[
+ index_start+len(tag_start):index_end# noqa: E203
+ ]
+
+ returnextract_text
Source code for agentscope.parsers.tagged_content_parser
+# -*- coding: utf-8 -*-
+"""The parser for tagged content in the model response."""
+importjson
+
+fromagentscope.exceptionimportJsonParsingError
+fromagentscope.modelsimportModelResponse
+fromagentscope.parsersimportParserBase
+
+
+
+[docs]
+classTaggedContent:
+"""A tagged content object to store the tag name, tag begin, content hint
+ and tag end."""
+
+ name:str
+"""The name of the tagged content."""
+
+ tag_begin:str
+"""The beginning tag."""
+
+ content_hint:str
+"""The hint of the content."""
+
+ tag_end:str
+"""The ending tag."""
+
+ parse_json:bool
+"""Whether to parse the content as a json object."""
+
+
+[docs]
+ def__init__(
+ self,
+ name:str,
+ tag_begin:str,
+ content_hint:str,
+ tag_end:str,
+ parse_json:bool=False,
+ )->None:
+"""Initialize the tagged content object.
+
+ Args:
+ name (`str`):
+ The name of the tagged content.
+ tag_begin (`str`):
+ The beginning tag.
+ content_hint (`str`):
+ The hint of the content.
+ tag_end (`str`):
+ The ending tag.
+ parse_json (`bool`, defaults to `False`):
+ Whether to parse the content as a json object.
+ """
+
+ self.name=name
+ self.tag_begin=tag_begin
+ self.content_hint=content_hint
+ self.tag_end=tag_end
+ self.parse_json=parse_json
+
+
+ def__str__(self)->str:
+"""Return the tagged content as a string."""
+ returnf"{self.tag_begin}{self.content_hint}{self.tag_end}"
+
+
+
+
+[docs]
+classMultiTaggedContentParser(ParserBase):
+"""Parse response text by multiple tags, and return a dict of their
+ content. Asking llm to generate JSON dictionary object directly maybe not a
+ good idea due to involving escape characters and other issues. So we can
+ ask llm to generate text with tags, and then parse the text to get the
+ final JSON dictionary object.
+ """
+
+ format_instruction=(
+ "Respond with specific tags as outlined below{json_required_hint}\n"
+ "{tag_lines_format}"
+ )
+"""The instruction for the format of the tagged content."""
+
+ json_required_hint=", and the content between {} MUST be a JSON object:"
+"""If a tagged content is required to be a JSON object by `parse_json`
+ equals to `True`, this instruction will be used to remind the model to
+ generate JSON object."""
+
+
+[docs]
+ def__init__(self,*tagged_contents:TaggedContent)->None:
+"""Initialize the parser with tags.
+
+ Args:
+ tags (`dict[str, Tuple[str, str]]`):
+ A dictionary of tags, the key is the tag name, and the value is
+ a tuple of starting tag and end tag.
+ """
+ self.tagged_contents=list(tagged_contents)
+
+ # Prepare the format instruction according to the tagged contents
+ tag_lines="\n".join([str(_)for_intagged_contents])
+
+ # Prepare hint for the tagged contents that requires a JSON object.
+ json_required_tags=", ".join(
+ [
+ f"{_.tag_begin} and {_.tag_end}"
+ for_intagged_contents
+ if_.parse_json
+ ],
+ )
+ ifjson_required_tags!="":
+ json_required_hint=self.json_required_hint.format(
+ json_required_tags,
+ )
+ else:
+ json_required_hint=": "
+
+ self.format_instruction=self.format_instruction.format(
+ json_required_hint=json_required_hint,
+ tag_lines_format=tag_lines,
+ )
+
+
+
+[docs]
+ defparse(self,response:ModelResponse)->ModelResponse:
+"""Parse the response text by tags, and return a dict of their content
+ in the parsed field of the model response object. If the tagged content
+ requires to parse as a JSON object by `parse_json` equals to `True`, it
+ will be parsed as a JSON object by `json.loads`."""
+
+ tag_to_content={}
+ fortagged_contentinself.tagged_contents:
+ tag_begin=tagged_content.tag_begin
+ tag_end=tagged_content.tag_end
+
+ extract_content=self._extract_first_content_by_tag(
+ response,
+ tag_begin,
+ tag_end,
+ )
+
+ iftagged_content.parse_json:
+ try:
+ extract_content=json.loads(extract_content)
+ exceptjson.decoder.JSONDecodeErrorase:
+ raw_response=f"{tag_begin}{extract_content}{tag_end}"
+ raiseJsonParsingError(
+ f"The content between {tagged_content.tag_begin} and "
+ f"{tagged_content.tag_end} should be a JSON object."
+ f'When parsing "{raw_response}", an error occurred: '
+ f"{e}",
+ raw_response=raw_response,
+ )fromNone
+
+ tag_to_content[tagged_content.name]=extract_content
+
+ response.parsed=tag_to_content
+ returnresponse
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/en/_modules/agentscope/pipelines/functional.html b/en/_modules/agentscope/pipelines/functional.html
index d57b9204c..e6e82d069 100644
--- a/en/_modules/agentscope/pipelines/functional.html
+++ b/en/_modules/agentscope/pipelines/functional.html
@@ -62,6 +62,8 @@
-[docs]
-classServiceFactory:
-"""A service factory class that turns service function into string
- prompt format."""
-
-
-[docs]
- @classmethod
- defget(
- cls,
- service_func:Callable[...,Any],
- **kwargs:Any,
- )->Tuple[Callable[...,Any],dict]:
-"""Covnert a service function into a tool function that agent can
- use, and generate a dictionary in JSON Schema format that can be
- used in OpenAI API directly. While for open-source model, developers
- should handle the conversation from json dictionary to prompt.
-
- Args:
- service_func (`Callable[..., Any]`):
- The service function to be called.
- kwargs (`Any`):
- The arguments to be passed to the service function.
-
- Returns:
- `Tuple(Callable[..., Any], dict)`: A tuple of tool function and
- a dict in JSON Schema format to describe the function.
-
- Note:
- The description of the function and arguments are extracted from
- its docstring automatically, which should be well-formatted in
- **Google style**. Otherwise, their descriptions in the returned
- dictionary will be empty.
-
- Suggestions:
- 1. The name of the service function should be self-explanatory,
- so that the agent can understand the function and use it properly.
- 2. The typing of the arguments should be provided when defining
- the function (e.g. `def func(a: int, b: str, c: bool)`), so that
- the agent can specify the arguments properly.
-
- Example:
-
- """
- # Get the function for agent to use
- tool_func=partial(service_func,**kwargs)
-
- # Obtain all arguments of the service function
- argsspec=inspect.getfullargspec(service_func)
-
- # Construct the mapping from arguments to their typings
- ifparseisNone:
- raiseImportError(
- "Missing required package `docstring_parser`"
- "Please install it by "
- "`pip install docstring_parser`.",
- )
-
- docstring=parse(service_func.__doc__)
-
- # Function description
- func_description=(
- docstring.short_descriptionordocstring.long_description
- )
-
- # The arguments that requires the agent to specify
- args_agent=set(argsspec.args)-set(kwargs.keys())
-
- # Check if the arguments from agent have descriptions in docstring
- args_description={
- _.arg_name:_.descriptionfor_indocstring.params
- }
-
- # Prepare default values
- ifargsspec.defaultsisNone:
- args_defaults={}
- else:
- args_defaults=dict(
- zip(
- reversed(argsspec.args),
- reversed(argsspec.defaults),# type: ignore
- ),
- )
-
- args_required=sorted(
- list(set(args_agent)-set(args_defaults.keys())),
- )
-
- # Prepare types of the arguments, remove the return type
- args_types={
- k:vfork,vinargsspec.annotations.items()ifk!="return"
- }
-
- # Prepare argument dictionary
- properties_field={}
- forkeyinargs_agent:
- arg_property={}
- # type
- ifkeyinargs_types:
- try:
- required_type=_get_type_str(args_types[key])
- arg_property["type"]=required_type
- exceptException:
- logger.warning(
- f"Fail and skip to get the type of the "
- f"argument `{key}`.",
- )
-
- # For Literal type, add enum field
- ifget_origin(args_types[key])isLiteral:
- arg_property["enum"]=list(args_types[key].__args__)
-
- # description
- ifkeyinargs_description:
- arg_property["description"]=args_description[key]
-
- # default
- ifkeyinargs_defaultsandargs_defaults[key]isnotNone:
- arg_property["default"]=args_defaults[key]
-
- properties_field[key]=arg_property
-
- # Construct the JSON Schema for the service function
- func_dict={
- "type":"function",
- "function":{
- "name":service_func.__name__,
- "description":func_description,
- "parameters":{
- "type":"object",
- "properties":properties_field,
- "required":args_required,
- },
- },
- }
-
- returntool_func,func_dict
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/en/_modules/agentscope/service/service_response.html b/en/_modules/agentscope/service/service_response.html
index 0d7482897..e7142da4e 100644
--- a/en/_modules/agentscope/service/service_response.html
+++ b/en/_modules/agentscope/service/service_response.html
@@ -62,6 +62,8 @@
Source code for agentscope.service.service_toolkit
+# -*- coding: utf-8 -*-
+"""Service Toolkit for service function usage."""
+import collections.abc
+import json
+from functools import partial
+import inspect
+from typing import (
+ Callable,
+ Any,
+ Tuple,
+ Union,
+ Optional,
+ Literal,
+ get_args,
+ get_origin,
+ List,
+)
+from loguru import logger
+
+from ..exception import (
+ JsonParsingError,
+ FunctionNotFoundError,
+ FunctionCallFormatError,
+)
+from .service_response import ServiceResponse
+from .service_response import ServiceExecStatus
+
+try:
+ from docstring_parser import parse
+except ImportError:
+ parse = None
+
+
+def _get_type_str(cls: Any) -> Optional[Union[str, list]]:
+ """Get the type string."""
+ type_str = None
+ if hasattr(cls, "__origin__"):
+ # Typing class
+ if cls.__origin__ is Union:
+ type_str = [_get_type_str(_) for _ in get_args(cls)]
+ elif cls.__origin__ is collections.abc.Sequence:
+ type_str = "array"
+ else:
+ type_str = str(cls.__origin__)
+ else:
+ # Normal class
+ if cls is str:
+ type_str = "string"
+ elif cls in [float, int, complex]:
+ type_str = "number"
+ elif cls is bool:
+ type_str = "boolean"
+ elif cls is collections.abc.Sequence:
+ type_str = "array"
+ elif cls is None.__class__:
+ type_str = "null"
+ else:
+ type_str = cls.__name__
+
+ return type_str # type: ignore[return-value]
+
+
+
+[docs]
+class ServiceFunction:
+ """The service function class."""
+
+ name: str
+ """The name of the service function."""
+
+ original_func: Callable
+ """The original function before processing."""
+
+ processed_func: Callable
+ """The processed function that can be called by the model directly."""
+
+ json_schema: dict
+ """The JSON schema description of the service function."""
+
+ require_args: bool
+ """Whether calling the service function requests arguments. Some arguments
+ may have default values, so it is not necessary to provide all arguments.
+ """
+
+
+[docs]
+class ServiceToolkit:
+ """A service toolkit class that turns service function into string
+ prompt format."""
+
+ service_funcs: dict[str, ServiceFunction]
+ """The registered functions in the service toolkit."""
+
+ _tools_instruction_format: str = (
+ "## Tool Functions:\n"
+ "The following tool functions are available in the format of\n"
+ "```\n"
+ "{{index}}. {{function name}}: {{function description}}\n"
+ "{{argument1 name}} ({{argument type}}): {{argument description}}\n"
+ "{{argument2 name}} ({{argument type}}): {{argument description}}\n"
+ "...\n"
+ "```\n\n"
+ "{function_prompt}\n"
+ )
+ """The instruction template for the tool functions."""
+
+ _tools_calling_format: str = (
+ '[{"name": "{function name}", "arguments": {"{argument1 name}": xxx,'
+ ' "{argument2 name}": xxx}}]'
+ )
+ """The format of the tool function call."""
+
+ _tools_execution_format: str = (
+ "{index}. Execute function {function_name}\n"
+ " [ARGUMENTS]:\n"
+ " {arguments}\n"
+ " [STATUS]: {status}\n"
+ " [RESULT]: {result}\n"
+ )
+ """The prompt template for the execution results."""
+
+
+[docs]
+ def __init__(self) -> None:
+ """Initialize the service toolkit with a list of service functions."""
+ self.service_funcs = {}
+
+
+
+[docs]
+ def add(self, service_func: Callable[..., Any], **kwargs: Any) -> None:
+ """Add a service function to the toolkit, which will be processed into
+ a tool function that can be called by the model directly, and
+ registered in processed_funcs.
+
+ Args:
+ service_func (`Callable[..., Any]`):
+ The service function to be called.
+ kwargs (`Any`):
+ The arguments to be passed to the service function.
+
+ Returns:
+ `Tuple(Callable[..., Any], dict)`: A tuple of tool function and
+ a dict in JSON Schema format to describe the function.
+
+ Note:
+ The description of the function and arguments are extracted from
+ its docstring automatically, which should be well-formatted in
+ **Google style**. Otherwise, their descriptions in the returned
+ dictionary will be empty.
+
+ Suggestions:
+ 1. The name of the service function should be self-explanatory,
+ so that the agent can understand the function and use it properly.
+ 2. The typing of the arguments should be provided when defining
+ the function (e.g. `def func(a: int, b: str, c: bool)`), so that
+ the agent can specify the arguments properly.
+ 3. The execution results should be a `ServiceResponse` object.
+
+ Example:
+
+ .. code-block:: python
+
+ def bing_search(query: str, api_key: str, num_results=10):
+ \"""Search the query in Bing search engine.
+
+ Args:
+ query: (`str`):
+ The string query to search.
+ api_key: (`str`):
+ The API key for Bing search.
+ num_results: (`int`, optional):
+ The number of results to return, default to 10.
+ \"""
+
+ # ... Your implementation here ...
+ return ServiceResponse(status, output)
+
+ """
+
+ processed_func, json_schema = ServiceToolkit.get(
+ service_func,
+ **kwargs,
+ )
+
+ # register the service function
+ name = service_func.__name__
+ if name in self.service_funcs:
+ logger.warning(
+ f"Service function `{name}` already exists, "
+ f"skip adding it.",
+ )
+ else:
+ self.service_funcs[name] = ServiceFunction(
+ name=name,
+ original_func=service_func,
+ processed_func=processed_func,
+ json_schema=json_schema,
+ )
+
+
+ @property
+ def json_schemas(self) -> dict:
+ """The json schema descriptions of the processed service funcs."""
+ return {k: v.json_schema for k, v in self.service_funcs.items()}
+
+ @property
+ def tools_calling_format(self) -> str:
+ """The calling format of the tool functions."""
+ return self._tools_calling_format
+
+ @property
+ def tools_instruction(self) -> str:
+ """The instruction of the tool functions."""
+ tools_prompt = []
+ for i, (func_name, desc) in enumerate(self.json_schemas.items()):
+ func_desc = desc["function"]["description"]
+ args_desc = desc["function"]["parameters"]["properties"]
+
+ args_list = [f"{i + 1}. {func_name}: {func_desc}"]
+ for args_name, args_info in args_desc.items():
+ if "type" in args_info:
+ args_line = (
+ f'\t{args_name} ({args_info["type"]}): '
+ f'{args_info.get("description", "")}'
+ )
+ else:
+ args_line = (
+ f'\t{args_name}: {args_info.get("description", "")}'
+ )
+ args_list.append(args_line)
+
+ func_prompt = "\n".join(args_list)
+ tools_prompt.append(func_prompt)
+
+ tools_description = "\n".join(tools_prompt)
+
+ if tools_description == "":
+ # No tools are provided
+ return ""
+ else:
+ return self._tools_instruction_format.format_map(
+ {"function_prompt": tools_description},
+ )
+
+ def _parse_and_check_text(self, cmd: Union[list[dict], str]) -> List[dict]:
+ """Parsing and check the format of the function calling text."""
+
+ # Record the error
+ error_info = []
+
+ if isinstance(cmd, str):
+ # --- Syntax check: if the input can be loaded by JSON
+ try:
+ processed_text = cmd.strip()
+
+ # complete "[" and "]" if they are missing
+ index_start = processed_text.find("[")
+ index_end = processed_text.rfind("]")
+
+ if index_start == -1:
+ index_start = 0
+ error_info.append('Missing "[" at the beginning.')
+
+ if index_end == -1:
+ index_end = len(processed_text)
+ error_info.append('Missing "]" at the end.')
+
+ # remove the unnecessary prefix before "[" and suffix after "]"
+ processed_text = processed_text[
+ index_start : index_end + 1 # noqa: E203
+ ]
+
+ cmds = json.loads(processed_text)
+ except json.JSONDecodeError:
+ # Since we have processed the text, here we can only report
+ # the JSON parsing error
+ raise JsonParsingError(
+ f"Except a list of dictionaries in JSON format, "
+ f"like: {self.tools_calling_format}",
+ ) from None
+ else:
+ cmds = cmd
+
+ # --- Semantic Check: if the input is a list of dicts with
+ # required fields
+
+ # Handle the case when the input is a single dictionary
+ if isinstance(cmds, dict):
+ # The error info is already recorded in error_info
+ cmds = [cmds]
+
+ if not isinstance(cmds, list):
+ # Not list, raise parsing error
+ raise JsonParsingError(
+ f"Except a list of dictionaries in JSON format "
+ f"like: {self.tools_calling_format}",
+ )
+
+ # --- Check the format of the command ---
+ for sub_cmd in cmds:
+ if not isinstance(sub_cmd, dict):
+ raise JsonParsingError(
+ f"Except a JSON list of dictionaries, but got"
+ f" {type(sub_cmd)} instead.",
+ )
+
+ if "name" not in sub_cmd:
+ raise FunctionCallFormatError(
+ "The field 'name' is required in the dictionary.",
+ )
+
+ # Obtain the service function
+ func_name = sub_cmd["name"]
+
+ # Cannot find the service function
+ if func_name not in self.service_funcs:
+ raise FunctionNotFoundError(
+ f"Cannot find a tool function named `{func_name}`.",
+ )
+
+ # Type error for the arguments
+ if not isinstance(sub_cmd["arguments"], dict):
+ raise FunctionCallFormatError(
+ "Except a dictionary for the arguments, but got "
+ f"{type(sub_cmd['arguments'])} instead.",
+ )
+
+ # Leaving the type checking and required checking to the runtime
+ # error reporting during execution
+ return cmds
+
+ def _execute_func(self, cmds: List[dict]) -> str:
+ """Execute the function with the arguments.
+
+ Args:
+ cmds (`List[dict]`):
+ A list of dictionaries, where each dictionary contains the
+ name of the function and its arguments, e.g. {"name": "func1",
+ "arguments": {"arg1": 1, "arg2": 2}}.
+
+ Returns:
+ `str`: The prompt of the execution results.
+ """
+
+ execute_results = []
+ for i, cmd in enumerate(cmds):
+ func_name = cmd["name"]
+ service_func = self.service_funcs[cmd["name"]]
+ kwargs = cmd.get("arguments", {})
+
+ print(f">>> Executing function {func_name} with arguments:")
+ for key, value in kwargs.items():
+ value = (
+ value if len(str(value)) < 50 else str(value)[:50] + "..."
+ )
+ print(f">>> \t{key}: {value}")
+
+ # Execute the function
+ try:
+ func_res = service_func.processed_func(**kwargs)
+ except Exception as e:
+ func_res = ServiceResponse(
+ status=ServiceExecStatus.ERROR,
+ content=str(e),
+ )
+
+ print(">>> END ")
+
+ status = (
+ "SUCCESS"
+ if func_res.status == ServiceExecStatus.SUCCESS
+ else "FAILED"
+ )
+
+ arguments = [f"{k}: {v}" for k, v in kwargs.items()]
+
+ execute_res = self._tools_execution_format.format_map(
+ {
+ "index": i + 1,
+ "function_name": cmd["name"],
+ "arguments": "\n\t\t".join(arguments),
+ "status": status,
+ "result": func_res.content,
+ },
+ )
+
+ execute_results.append(execute_res)
+
+ execute_results_prompt = "\n".join(execute_results)
+
+ return execute_results_prompt
+
+
+[docs]
+ def parse_and_call_func(self, text_cmd: Union[list[dict], str]) -> str:
+ """Parse, check the text and call the function."""
+
+ # --- Step 1: Parse the text according to the tools_call_format
+ cmds = self._parse_and_check_text(text_cmd)
+
+ # --- Step 2: Call the service function ---
+
+ execute_results_prompt = self._execute_func(cmds)
+
+ return execute_results_prompt
+
+
+
+[docs]
+ @classmethod
+ def get(
+ cls,
+ service_func: Callable[..., Any],
+ **kwargs: Any,
+ ) -> Tuple[Callable[..., Any], dict]:
+ """Convert a service function into a tool function that agent can
+ use, and generate a dictionary in JSON Schema format that can be
+ used in OpenAI API directly. While for open-source model, developers
+ should handle the conversation from json dictionary to prompt.
+
+ Args:
+ service_func (`Callable[..., Any]`):
+ The service function to be called.
+ kwargs (`Any`):
+ The arguments to be passed to the service function.
+
+ Returns:
+ `Tuple(Callable[..., Any], dict)`: A tuple of tool function and
+ a dict in JSON Schema format to describe the function.
+
+ Note:
+ The description of the function and arguments are extracted from
+ its docstring automatically, which should be well-formatted in
+ **Google style**. Otherwise, their descriptions in the returned
+ dictionary will be empty.
+
+ Suggestions:
+ 1. The name of the service function should be self-explanatory,
+ so that the agent can understand the function and use it properly.
+ 2. The typing of the arguments should be provided when defining
+ the function (e.g. `def func(a: int, b: str, c: bool)`), so that
+ the agent can specify the arguments properly.
+
+ Example:
+
+ .. code-block:: python
+
+ def bing_search(query: str, api_key: str, num_results: int=10):
+ '''Search the query in Bing search engine.
+
+ Args:
+ query (str):
+ The string query to search.
+ api_key (str):
+ The API key for Bing search.
+ num_results (int):
+ The number of results to return, default to 10.
+ '''
+ pass
+
+
+ """
+ # Get the function for agent to use
+ tool_func = partial(service_func, **kwargs)
+
+ # Obtain all arguments of the service function
+ argsspec = inspect.getfullargspec(service_func)
+
+ # Construct the mapping from arguments to their typings
+ if parse is None:
+ raise ImportError(
+ "Missing required package `docstring_parser`"
+ "Please install it by "
+ "`pip install docstring_parser`.",
+ )
+
+ docstring = parse(service_func.__doc__)
+
+ # Function description
+ func_description = (
+ docstring.short_description or docstring.long_description
+ )
+
+ # The arguments that requires the agent to specify
+ args_agent = set(argsspec.args) - set(kwargs.keys())
+
+ # Check if the arguments from agent have descriptions in docstring
+ args_description = {
+ _.arg_name: _.description for _ in docstring.params
+ }
+
+ # Prepare default values
+ if argsspec.defaults is None:
+ args_defaults = {}
+ else:
+ args_defaults = dict(
+ zip(
+ reversed(argsspec.args),
+ reversed(argsspec.defaults), # type: ignore
+ ),
+ )
+
+ args_required = sorted(
+ list(set(args_agent) - set(args_defaults.keys())),
+ )
+
+ # Prepare types of the arguments, remove the return type
+ args_types = {
+ k: v for k, v in argsspec.annotations.items() if k != "return"
+ }
+
+ # Prepare argument dictionary
+ properties_field = {}
+ for key in args_agent:
+ arg_property = {}
+ # type
+ if key in args_types:
+ try:
+ required_type = _get_type_str(args_types[key])
+ arg_property["type"] = required_type
+ except Exception:
+ logger.warning(
+ f"Fail and skip to get the type of the "
+ f"argument `{key}`.",
+ )
+
+ # For Literal type, add enum field
+ if get_origin(args_types[key]) is Literal:
+ arg_property["enum"] = list(args_types[key].__args__)
+
+ # description
+ if key in args_description:
+ arg_property["description"] = args_description[key]
+
+ # default
+ if key in args_defaults and args_defaults[key] is not None:
+ arg_property["default"] = args_defaults[key]
+
+ properties_field[key] = arg_property
+
+ # Construct the JSON Schema for the service function
+ func_dict = {
+ "type": "function",
+ "function": {
+ "name": service_func.__name__,
+ "description": func_description,
+ "parameters": {
+ "type": "object",
+ "properties": properties_field,
+ "required": args_required,
+ },
+ },
+ }
+
+ return tool_func, func_dict
+
+
+
+
+
+[docs]
+class ServiceFactory:
+ """A service factory class that turns service function into string
+ prompt format."""
+
+
+[docs]
+ @classmethod
+ def get(
+ cls,
+ service_func: Callable[..., Any],
+ **kwargs: Any,
+ ) -> Tuple[Callable[..., Any], dict]:
+ """Convert a service function into a tool function that agent can
+ use, and generate a dictionary in JSON Schema format that can be
+ used in OpenAI API directly. While for open-source model, developers
+ should handle the conversation from json dictionary to prompt.
+
+ Args:
+ service_func (`Callable[..., Any]`):
+ The service function to be called.
+ kwargs (`Any`):
+ The arguments to be passed to the service function.
+
+ Returns:
+ `Tuple(Callable[..., Any], dict)`: A tuple of tool function and
+ a dict in JSON Schema format to describe the function.
+
+ Note:
+ The description of the function and arguments are extracted from
+ its docstring automatically, which should be well-formatted in
+ **Google style**. Otherwise, their descriptions in the returned
+ dictionary will be empty.
+
+ Suggestions:
+ 1. The name of the service function should be self-explanatory,
+ so that the agent can understand the function and use it properly.
+ 2. The typing of the arguments should be provided when defining
+ the function (e.g. `def func(a: int, b: str, c: bool)`), so that
+ the agent can specify the arguments properly.
+
+ Example:
+
+ .. code-block:: python
+
+ def bing_search(query: str, api_key: str, num_results: int=10):
+ '''Search the query in Bing search engine.
+
+ Args:
+ query (str):
+ The string query to search.
+ api_key (str):
+ The API key for Bing search.
+ num_results (int):
+ The number of results to return, default to 10.
+ '''
+ pass
+
+
+ """
+ logger.warning(
+ "The service factory will be deprecated in the future."
+ " Try to use the `ServiceToolkit` class instead.",
+ )
+
+ # Get the function for agent to use
+ tool_func = partial(service_func, **kwargs)
+
+ # Obtain all arguments of the service function
+ argsspec = inspect.getfullargspec(service_func)
+
+ # Construct the mapping from arguments to their typings
+ if parse is None:
+ raise ImportError(
+ "Missing required package `docstring_parser`"
+ "Please install it by "
+ "`pip install docstring_parser`.",
+ )
+
+ docstring = parse(service_func.__doc__)
+
+ # Function description
+ func_description = (
+ docstring.short_description or docstring.long_description
+ )
+
+ # The arguments that requires the agent to specify
+ args_agent = set(argsspec.args) - set(kwargs.keys())
+
+ # Check if the arguments from agent have descriptions in docstring
+ args_description = {
+ _.arg_name: _.description for _ in docstring.params
+ }
+
+ # Prepare default values
+ if argsspec.defaults is None:
+ args_defaults = {}
+ else:
+ args_defaults = dict(
+ zip(
+ reversed(argsspec.args),
+ reversed(argsspec.defaults), # type: ignore
+ ),
+ )
+
+ args_required = sorted(
+ list(set(args_agent) - set(args_defaults.keys())),
+ )
+
+ # Prepare types of the arguments, remove the return type
+ args_types = {
+ k: v for k, v in argsspec.annotations.items() if k != "return"
+ }
+
+ # Prepare argument dictionary
+ properties_field = {}
+ for key in args_agent:
+ arg_property = {}
+ # type
+ if key in args_types:
+ try:
+ required_type = _get_type_str(args_types[key])
+ arg_property["type"] = required_type
+ except Exception:
+ logger.warning(
+ f"Fail and skip to get the type of the "
+ f"argument `{key}`.",
+ )
+
+ # For Literal type, add enum field
+ if get_origin(args_types[key]) is Literal:
+ arg_property["enum"] = list(args_types[key].__args__)
+
+ # description
+ if key in args_description:
+ arg_property["description"] = args_description[key]
+
+ # default
+ if key in args_defaults and args_defaults[key] is not None:
+ arg_property["default"] = args_defaults[key]
+
+ properties_field[key] = arg_property
+
+ # Construct the JSON Schema for the service function
+ func_dict = {
+ "type": "function",
+ "function": {
+ "name": service_func.__name__,
+ "description": func_description,
+ "parameters": {
+ "type": "object",
+ "properties": properties_field,
+ "required": args_required,
+ },
+ },
+ }
+
+ return tool_func, func_dict
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/en/_modules/agentscope/service/sql_query/mongodb.html b/en/_modules/agentscope/service/sql_query/mongodb.html
index 9bc8e3a32..39b220c5a 100644
--- a/en/_modules/agentscope/service/sql_query/mongodb.html
+++ b/en/_modules/agentscope/service/sql_query/mongodb.html
@@ -62,6 +62,8 @@
Source code for agentscope.service.sql_query.mysql
from typingimportAnyfrom..service_responseimportServiceResponse
-from...utils.commonimportif_change_database
+from...utils.commonimport_if_change_databasefrom...service.service_statusimportServiceExecStatustry:
@@ -153,7 +155,7 @@
Source code for agentscope.service.sql_query.mysql
"""
# Check if the query is safe
- ifnotallow_change_dataandnotif_change_database(query):
+ ifnotallow_change_dataandnot_if_change_database(query):raiseValueError("Unsafe SQL query detected. Only SELECT statements are allowed. ""If you want to allow changing data in the database, "
@@ -180,7 +182,7 @@
Source code for agentscope.service.sql_query.mysql
Source code for agentscope.service.sql_query.sqlite
from typingimportAnyfrom...service.service_responseimportServiceResponse
-from...utils.commonimportif_change_database
+from...utils.commonimport_if_change_databasefrom...service.service_statusimportServiceExecStatustry:
@@ -139,7 +141,7 @@
Source code for agentscope.service.sql_query.sqlite
"""
# Check if the query is safe
- ifnotallow_change_dataandnotif_change_database(query):
+ ifnotallow_change_dataandnot_if_change_database(query):raiseValueError("Unsafe SQL query detected. Only SELECT statements are allowed. ""If you want to allow changing data in the database, "
@@ -158,7 +160,7 @@
Source code for agentscope.service.sql_query.sqlite
results =cursor.fetchall()# commit the change if needed
- ifif_change_database(query):
+ if_if_change_database(query):conn.commit()cursor.close()
diff --git a/en/_modules/agentscope/service/text_processing/summarization.html b/en/_modules/agentscope/service/text_processing/summarization.html
index b712f96a0..6a4b9b8eb 100644
--- a/en/_modules/agentscope/service/text_processing/summarization.html
+++ b/en/_modules/agentscope/service/text_processing/summarization.html
@@ -62,6 +62,8 @@
# This will raise an exception for HTTP error codesresponse.raise_for_status()exceptrequests.RequestExceptionase:
- logger.error(e)returnstr(e)# Parse the JSON responsesearch_results=response.json()
@@ -276,9 +276,7 @@
Source code for agentscope.utils.common
-
-[docs]
-defif_change_database(sql_query:str)->bool:
+def_if_change_database(sql_query:str)->bool:"""Check whether SQL query only contains SELECT query"""# Compile the regex pattern outside the function for better performancepattern_unsafe_sql=re.compile(
@@ -293,8 +291,7 @@
Source code for agentscope.utils.common
# Matching non-SELECT statements with regular expressionsifpattern_unsafe_sql.search(sql_query):returnFalse
- returnTrue
returnjson.dumps(content,ensure_ascii=False)else:returnstr(content)
+
+
+def_join_str_with_comma_and(elements:List[str])->str:
+"""Return the JSON string with comma, and use " and " between the last two
+ elements."""
+
+ iflen(elements)==0:
+ return""
+ eliflen(elements)==1:
+ returnelements[0]
+ eliflen(elements)==2:
+ return" and ".join(elements)
+ else:
+ return", ".join(elements[:-1])+f", and {elements[-1]}"
diff --git a/en/_sources/agentscope.exception.rst.txt b/en/_sources/agentscope.exception.rst.txt
new file mode 100644
index 000000000..9ef713c41
--- /dev/null
+++ b/en/_sources/agentscope.exception.rst.txt
@@ -0,0 +1,6 @@
+agentscope.exception
+====================
+.. automodule:: agentscope.exception
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/en/_sources/agentscope.parsers.code_block_parser.rst.txt b/en/_sources/agentscope.parsers.code_block_parser.rst.txt
new file mode 100644
index 000000000..0249375e0
--- /dev/null
+++ b/en/_sources/agentscope.parsers.code_block_parser.rst.txt
@@ -0,0 +1,6 @@
+agentscope.parsers.code_block_parser
+====================================
+.. automodule:: agentscope.parsers.code_block_parser
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/en/_sources/agentscope.parsers.json_object_parser.rst.txt b/en/_sources/agentscope.parsers.json_object_parser.rst.txt
new file mode 100644
index 000000000..e369969b4
--- /dev/null
+++ b/en/_sources/agentscope.parsers.json_object_parser.rst.txt
@@ -0,0 +1,6 @@
+agentscope.parsers.json_object_parser
+=====================================
+.. automodule:: agentscope.parsers.json_object_parser
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/en/_sources/agentscope.parsers.parser_base.rst.txt b/en/_sources/agentscope.parsers.parser_base.rst.txt
new file mode 100644
index 000000000..f9e6b2d66
--- /dev/null
+++ b/en/_sources/agentscope.parsers.parser_base.rst.txt
@@ -0,0 +1,6 @@
+agentscope.parsers.parser_base
+==============================
+.. automodule:: agentscope.parsers.parser_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/en/_sources/agentscope.parsers.rst.txt b/en/_sources/agentscope.parsers.rst.txt
new file mode 100644
index 000000000..e081ce0e8
--- /dev/null
+++ b/en/_sources/agentscope.parsers.rst.txt
@@ -0,0 +1,7 @@
+agentscope.parsers
+==================
+
+.. automodule:: agentscope.parsers
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/en/_sources/agentscope.parsers.tagged_content_parser.rst.txt b/en/_sources/agentscope.parsers.tagged_content_parser.rst.txt
new file mode 100644
index 000000000..bfbdaa5e8
--- /dev/null
+++ b/en/_sources/agentscope.parsers.tagged_content_parser.rst.txt
@@ -0,0 +1,6 @@
+agentscope.parsers.tagged_content_parser
+========================================
+.. automodule:: agentscope.parsers.tagged_content_parser
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/zh_CN/_sources/agentscope.service.service_factory.rst.txt b/en/_sources/agentscope.service.service_toolkit.rst.txt
similarity index 50%
rename from zh_CN/_sources/agentscope.service.service_factory.rst.txt
rename to en/_sources/agentscope.service.service_toolkit.rst.txt
index cf4ab1d5b..b9765a4dd 100644
--- a/zh_CN/_sources/agentscope.service.service_factory.rst.txt
+++ b/en/_sources/agentscope.service.service_toolkit.rst.txt
@@ -1,6 +1,6 @@
-agentscope.service.service_factory
+agentscope.service.service_toolkit
==================================
-.. automodule:: agentscope.service.service_factory
+.. automodule:: agentscope.service.service_toolkit
:members:
:undoc-members:
:show-inheritance:
\ No newline at end of file
diff --git a/en/_sources/index.rst.txt b/en/_sources/index.rst.txt
index 091134860..fb81e2e64 100644
--- a/en/_sources/index.rst.txt
+++ b/en/_sources/index.rst.txt
@@ -33,6 +33,8 @@ AgentScope Documentation
agentscope.models
agentscope.agents
agentscope.memory
+ agentscope.parsers
+ agentscope.exception
agentscope.pipelines
agentscope.service
agentscope.rpc
diff --git a/en/_sources/tutorial/204-service.md.txt b/en/_sources/tutorial/204-service.md.txt
index 826b1fec6..30d82242d 100644
--- a/en/_sources/tutorial/204-service.md.txt
+++ b/en/_sources/tutorial/204-service.md.txt
@@ -44,30 +44,38 @@ About each service function, you can find detailed information in the
## How to use Service Functions
-AgentScope provides two service classes for Service functions,
-`ServiceFactory` and `ServiceResponse`.
-
-- `ServiceFactory` is mainly used to convert general Python functions into
- a form that can be directly used by large-scale models, and automatically
- generate function descriptions in JSON schema format.
-- `ServiceResponse` is a subclass of a dictionary, providing a unified call
- result interface for all Service functions.
-
-### About Service Factory
-
-The tools used by agents are generally of the function type. Developers
-need to prepare functions that can be called directly by large models, and
-provide descriptions of the functions. However, general functions often
-require developers to provide some parameters (such as keys, usernames,
-specific URLs, etc.), and then the large model can use them. At the same
-time, it is also a tedious task to generate specific format descriptions
-for multiple functions.
-
-To tackle the above problems, AgentScope introduces `ServiceFactory`. For a
-given Service function, it allows developers to specify some parameters,
-generate a function that can be called directly by large models, and
-automatically generate function descriptions based on the Docstring. Take
-the Bing web search function as an example.
+AgentScope provides two classes for service functions,
+`ServiceToolkit` and `ServiceResponse`.
+
+### About Service Toolkit
+
+The use of tools for LLM usually involves five steps:
+
+1. **Prepare tool functions**. That is, developers should pre-process the
+functions by providing necessary parameters, e.g. api key, username,
+password, etc.
+2. **Prepare instruction for LLM**. A detailed description for these tool
+functions are required for the LLM to understand them properly.
+3. **Guide LLM how to use tool functions**. A format description for calling
+functions is required.
+4. **Parse LLM response**. Once the LLM generates a response,
+we need to parse it according to above format in the third step.
+5. **Call functions and handle exceptions**. Calling the functions, return
+the results, and handle exceptions.
+
+To simplify the above steps and improve reusability, AgentScope introduces
+`ServiceToolkit`. It can
+- register python functions
+- generate tool function descriptions in both string and JSON schema format
+- generate usage instruction for LLM
+- parse the model response, call the tool functions, and handle exceptions
+
+#### How to use
+
+Follow the steps below to use `ServiceToolkit`:
+
+1. Init a `ServiceToolkit` object and register service functions with necessary
+parameters. Take the following Bing search function as an example.
```python
def bing_search(
@@ -95,73 +103,114 @@ def bing_search(
"""
```
-In the above function, `question` is the field filled in by the large model,
-while `api_key` and `num_results` are the parameters that the developer needs to provide.
-We use the `get` function of `ServiceFactory` to process it:
+We register the function in a `ServiceToolkit` object by providing `api_key` and `num_results` as necessary parameters.
```python
-from agentscope.service import ServiceFactory
+from agentscope.service import ServiceToolkit
+
+service_toolkit = ServiceToolkit()
-func, func_intro = ServiceFactory.get(
+service_toolkit.add(
bing_search,
api_key="xxx",
- num_results=3)
+ num_results=3
+)
```
-In the above code, the `func` generated by ServiceFactory is equivalent to the following function:
+2. Use the `tools_instruction` attribute to instruct LLM in prompt, or use the `json_schemas` attribute to get the JSON schema format descriptions to construct customized instruction or directly use in model APIs (e.g. OpenAI Chat API).
-```python
-def bing_search(question: str) -> ServiceResponse:
- """
- Search question in Bing Search API and return the searching results
+````text
+>> print(service_toolkit.tools_instruction)
+## Tool Functions:
+The following tool functions are available in the format of
+```
+{index}. {function name}: {function description}
+{argument1 name} ({argument type}): {argument description}
+{argument2 name} ({argument type}): {argument description}
+...
+```
- Args:
- question (`str`):
- The search query string.
- """
- return bing_search(question, api_key="xxx", num_results=3)
+1. bing_search: Search question in Bing Search API and return the searching results
+ question (str): The search query string.
+````
+````text
+>> print(service_toolkit.json_schemas)
+{
+ "bing_search": {
+ "type": "function",
+ "function": {
+ "name": "bing_search",
+ "description": "Search question in Bing Search API and return the searching results",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "question": {
+ "type": "string",
+ "description": "The search query string."
+ }
+ },
+ "required": [
+ "question"
+ ]
+ }
+ }
+ }
+}
+````
+
+3. Guide LLM how to use tool functions by the `tools_calling_format` attribute.
+The ServiceToolkit module requires LLM to return a list of dictionaries in
+JSON format, where each dictionary represents a function call. It must
+contain two fields, `name` and `arguments`, where `name` is the function name
+and `arguments` is a dictionary that maps from the argument name to the
+argument value.
+
+
+```text
+>> print(service_toolkit.tools_calling_format)
+[{"name": "{function name}", "arguments": {"{argument1 name}": xxx, "{argument2 name}": xxx}}]
```
-The generated JSON schema format is as follows, which can be directly used
-in the `tools` field of the OpenAI API.
+4. Parse the LLM response and call functions by its `parse_and_call_func`
+method. This function takes a string or a parsed dictionary as input.
+- When the input is a string, this function will parse it accordingly and execute the function with the parsed arguments.
+- While if the input is a parse dictionary, it will call the function directly.
```python
-# print(func_intro)
-{
- "type": "function",
- "function": {
- "name": "bing_search",
- "description": "Search question in Bing Search API and return the searching results",
- "parameters": {
- "type": "object",
- "properties": {
- "question": {
- "type": "string",
- "description": "The search query string."
- }
- },
- "required": [
- "question"
- ]
- }
- }
-}
+# a string input
+string_input = '[{"name": "bing_search", "arguments": {"question": "xxx"}}]'
+res_of_string_input = service_toolkit.parse_and_call_func(string_input)
+
+# or a parsed dictionary
+dict_input = [{"name": "bing_search", "arguments": {"question": "xxx"}}]
+# res_of_dict_input is the same as res_of_string_input
+res_of_dict_input = service_toolkit.parse_and_call_func(dict_input)
+
+print(res_of_string_input)
+```
```
+1. Execute function bing_search
+ [ARGUMENTS]:
+ question: xxx
+ [STATUS]: SUCCESS
+ [RESULT]: ...
+```
+
+More specific examples refer to the `ReActAgent` class in `agentscope.agents`.
-**Note**:
-The description of the function and arguments are extracted from
-its docstring automatically, which should be well-formatted in
-**Google style**. Otherwise, their descriptions in the returned
-dictionary will be empty.
+#### Create new Service Function
-**Suggestions**:
+A new service function that can be used by `ServiceToolkit` should meet the following requirements:
-1. The name of the service function should be self-explanatory,
-so that the agent can understand the function and use it properly.
-2. The typing of the arguments should be provided when defining
+1. Well-formatted docstring (Google style is recommended), so that the
+`ServiceToolkit` can extract both the function descriptions.
+2. The name of the service function should be self-explanatory,
+so that the LLM can understand the function and use it properly.
+3. The typing of the arguments should be provided when defining
the function (e.g. `def func(a: int, b: str, c: bool)`), so that
the agent can specify the arguments properly.
+
### About ServiceResponse
`ServiceResponse` is a wrapper for the execution results of the services,
diff --git a/en/agentscope.agents.agent.html b/en/agentscope.agents.agent.html
index 7bc98f138..5ad14cc70 100644
--- a/en/agentscope.agents.agent.html
+++ b/en/agentscope.agents.agent.html
@@ -61,6 +61,8 @@
Initialize the ReAct agent with the given name, model config name
and tools.
@@ -537,10 +539,11 @@
sys_prompt (str) – The system prompt of the agent.
model_config_name (str) – The name of the model config, which is used to load model from
configuration.
-
tools (List[Tuple]) – A list of tuples, each containing the name of a tool and the
-tool’s description in JSON schema format.
+
service_toolkit (ServiceToolkit) – A ServiceToolkit object that contains the tool functions.
max_iters (int, defaults to 10) – The maximum number of iterations of the reasoning-acting loops.
-
verbose (bool, defaults to True) – Whether to print the output of the tools.
+
verbose (bool, defaults to True) – Whether to print the detailed information during reasoning and
+acting steps. If False, only the content in speak field will
+be print out.
Initialize the ReAct agent with the given name, model config name
and tools.
@@ -120,10 +122,11 @@
sys_prompt (str) – The system prompt of the agent.
model_config_name (str) – The name of the model config, which is used to load model from
configuration.
-
tools (List[Tuple]) – A list of tuples, each containing the name of a tool and the
-tool’s description in JSON schema format.
+
service_toolkit (ServiceToolkit) – A ServiceToolkit object that contains the tool functions.
max_iters (int, defaults to 10) – The maximum number of iterations of the reasoning-acting loops.
-
verbose (bool, defaults to True) – Whether to print the output of the tools.
+
verbose (bool, defaults to True) – Whether to print the detailed information during reasoning and
+acting steps. If False, only the content in speak field will
+be print out.
Covnert a service function into a tool function that agent can
-use, and generate a dictionary in JSON Schema format that can be
-used in OpenAI API directly. While for open-source model, developers
-should handle the conversation from json dictionary to prompt.
-
-
Parameters:
-
-
service_func (Callable[…, Any]) – The service function to be called.
-
kwargs (Any) – The arguments to be passed to the service function.
-
-
-
Returns:
-
A tuple of tool function and
-a dict in JSON Schema format to describe the function.
-
-
Return type:
-
Tuple(Callable[…, Any], dict)
-
-
-
-
Note
-
The description of the function and arguments are extracted from
-its docstring automatically, which should be well-formatted in
-Google style. Otherwise, their descriptions in the returned
-dictionary will be empty.
-
-
-
Suggestions:
1. The name of the service function should be self-explanatory,
-so that the agent can understand the function and use it properly.
-2. The typing of the arguments should be provided when defining
-the function (e.g. def func(a: int, b: str, c: bool)), so that
-the agent can specify the arguments properly.
Parse response text by multiple tags, and return a dict of their
+content. Asking llm to generate JSON dictionary object directly maybe not a
+good idea due to involving escape characters and other issues. So we can
+ask llm to generate text with tags, and then parse the text to get the
+final JSON dictionary object.
If a tagged content is required to be a JSON object by parse_json
+equals to True, this instruction will be used to remind the model to
+generate JSON object.
Parse the response text by tags, and return a dict of their content
+in the parsed field of the model response object. If the tagged content
+requires to parse as a JSON object by parse_json equals to True, it
+will be parsed as a JSON object by json.loads.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/en/agentscope.pipelines.functional.html b/en/agentscope.pipelines.functional.html
index 44cb8a4c1..87aa28783 100644
--- a/en/agentscope.pipelines.functional.html
+++ b/en/agentscope.pipelines.functional.html
@@ -61,6 +61,8 @@