diff --git a/.gitignore b/.gitignore
index 66658cf65..db8d4d7f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -137,4 +137,6 @@ dmypy.json
docs/sphinx_doc/build/
# Used to save loggings and files
-runs/
+*runs/
+agentscope.db
+tmp*.json
diff --git a/README.md b/README.md
index 9206dfd73..bdaa26837 100644
--- a/README.md
+++ b/README.md
@@ -65,21 +65,21 @@ applications in a centralized programming manner for streamlined development.
AgentScope provides a list of `ModelWrapper` to support both local model
services and third-party model APIs.
-| API | Task | Model Wrapper | Example Configuration | Some Supported Models |
+| API | Task | Model Wrapper | Configuration | Some Supported Models |
|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|----------------------------------------------|
-| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api) | gpt-4, gpt-3.5-turbo, ... |
-| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api) | text-embedding-ada-002, ... |
-| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api) | dall-e-2, dall-e-3 |
-| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | qwen-plus, qwen-max, ... |
-| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | wanx-v1 |
-| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | text-embedding-v1, text-embedding-v2, ... |
-| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | qwen-vl-v1, qwen-vl-chat-v1, qwen-audio-chat |
-| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api) | gemini-pro, ... |
-| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api) | models/embedding-001, ... |
-| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api) | llama2, Mistral, ... |
-| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api) | llama2, Mistral, ... |
-| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api) | llama2, Mistral, ... |
-| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api) | - |
+| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) |[guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4, gpt-3.5-turbo, ... |
+| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... |
+| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 |
+| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... |
+| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 |
+| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... |
+| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat |
+| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... |
+| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... |
+| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama2, Mistral, ... |
+| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... |
+| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... |
+| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - |
**Supported Local Model Deployment**
@@ -109,7 +109,6 @@ the following libraries.
- [Self-Organizing Conversation](./examples/conversation_self_organizing)
- [Basic Conversation with LangChain library](./examples/conversation_with_langchain)
- [Conversation with ReAct Agent](./examples/conversation_with_react_agent)
- - [Conversation in Natural Language to Query SQL](./examples/conversation_nl2sql/)
- [Conversation with RAG Agent](./examples/conversation_with_RAG_agents)
- Game
@@ -186,7 +185,7 @@ Taking OpenAI Chat API as an example, the model configuration is as follows:
```python
openai_model_config = {
"config_name": "my_openai_config", # The name to identify the config
- "model_type": "openai", # The type to identify the model wrapper
+ "model_type": "openai_chat", # The type to identify the model wrapper
# Detailed parameters into initialize the model wrapper
"model_name": "gpt-4", # The used model in openai API, e.g. gpt-4, gpt-3.5-turbo, etc.
diff --git a/README_ZH.md b/README_ZH.md
index 36c7e2624..098a8c0c3 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -54,21 +54,21 @@ AgentScope是一个创新的多智能体开发平台,旨在赋予开发人员
AgentScope提供了一系列`ModelWrapper`来支持本地模型服务和第三方模型API。
-| API | Task | Model Wrapper | Example Configuration | Some Supported Models |
+| API | Task | Model Wrapper | Configuration | Some Supported Models |
|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|----------------------------------------------|
-| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api) | gpt-4, gpt-3.5-turbo, ... |
-| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api) | text-embedding-ada-002, ... |
-| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api) | dall-e-2, dall-e-3 |
-| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | qwen-plus, qwen-max, ... |
-| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | wanx-v1 |
-| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | text-embedding-v1, text-embedding-v2, ... |
-| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api) | qwen-vl-v1, qwen-vl-chat-v1, qwen-audio-chat |
-| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api) | gemini-pro, ... |
-| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api) | models/embedding-001, ... |
-| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api) | llama2, Mistral, ... |
-| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api) | llama2, Mistral, ... |
-| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api) | llama2, Mistral, ... |
-| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [link](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api) | - |
+| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) |[guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4, gpt-3.5-turbo, ... |
+| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... |
+| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 |
+| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... |
+| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 |
+| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... |
+| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat |
+| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... |
+| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... |
+| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama2, Mistral, ... |
+| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... |
+| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... |
+| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - |
**支持的本地模型部署**
@@ -170,7 +170,7 @@ model_config = {
```python
openai_model_config = {
"config_name": "my_openai_config", # 模型配置的名称
- "model_type": "openai", # 模型wrapper的类型
+ "model_type": "openai_chat", # 模型wrapper的类型
# 用以初始化模型wrapper的详细参数
"model_name": "gpt-4", # OpenAI API中的模型名
diff --git a/docs/sphinx_doc/en/source/tutorial/103-example.md b/docs/sphinx_doc/en/source/tutorial/103-example.md
index 4b0e39ea7..563d072d9 100644
--- a/docs/sphinx_doc/en/source/tutorial/103-example.md
+++ b/docs/sphinx_doc/en/source/tutorial/103-example.md
@@ -21,7 +21,7 @@ While for model invocation, users should prepare a model configuration to specif
```python
model_config = {
"config_name": "{config_name}", # A unique name for the model config.
- "model_type": "openai", # Choose from "openai", "openai_dall_e", or "openai_embedding".
+ "model_type": "openai_chat", # Choose from "openai_chat", "openai_dall_e", or "openai_embedding".
"model_name": "{model_name}", # The model identifier used in the OpenAI API, such as "gpt-3.5-turbo", "gpt-4", or "text-embedding-ada-002".
"api_key": "xxx", # Your OpenAI API key. If unset, the environment variable OPENAI_API_KEY is used.
diff --git a/docs/sphinx_doc/en/source/tutorial/104-usecase.md b/docs/sphinx_doc/en/source/tutorial/104-usecase.md
index 0b9009e3b..3aa3b7d0b 100644
--- a/docs/sphinx_doc/en/source/tutorial/104-usecase.md
+++ b/docs/sphinx_doc/en/source/tutorial/104-usecase.md
@@ -24,7 +24,7 @@ As we discussed in the last tutorial, you need to prepare your model configurati
[
{
"config_name": "gpt-4-temperature-0.0",
- "model_type": "openai",
+ "model_type": "openai_chat",
"model_name": "gpt-4",
"api_key": "xxx",
"organization": "xxx",
diff --git a/docs/sphinx_doc/en/source/tutorial/201-agent.md b/docs/sphinx_doc/en/source/tutorial/201-agent.md
index a30c2153c..7d583331d 100644
--- a/docs/sphinx_doc/en/source/tutorial/201-agent.md
+++ b/docs/sphinx_doc/en/source/tutorial/201-agent.md
@@ -42,7 +42,8 @@ class AgentBase(Operator):
# messages it has observed. This method can be used to enrich the
# agent's understanding and memory without producing an immediate
# response.
- self.memory.add(x)
+ if self.memory:
+ self.memory.add(x)
def reply(self, x: dict = None) -> dict:
# The core method to be implemented by custom agents. It defines the
@@ -86,23 +87,31 @@ Below, we provide usages of how to configure various agents from the AgentPool:
def reply(self, x: dict = None) -> dict:
# Additional processing steps can occur here
+ # Record the input if needed
if self.memory:
- self.memory.add(x) # Update the memory with the input
+ self.memory.add(x)
# Generate a prompt for the language model using the system prompt and memory
- prompt = self.engine.join(
- self.sys_prompt,
- self.memory and self.memory.get_memory(),
+ prompt = self.model.format(
+ Msg("system", self.sys_prompt, role="system"),
+ self.memory
+ and self.memory.get_memory()
+ or x, # type: ignore[arg-type]
)
# Invoke the language model with the prepared prompt
response = self.model(prompt).text
- # Format the response and create a message object
- msg = Msg(self.name, response)
+ #Format the response and create a message object
+ msg = Msg(self.name, response, role="assistant")
+
+ # Print/speak the message in this agent's voice
+ self.speak(msg)
# Record the message to memory and return it
- self.memory.add(msg)
+ if self.memory:
+ self.memory.add(msg)
+
return msg
```
diff --git a/docs/sphinx_doc/en/source/tutorial/203-model.md b/docs/sphinx_doc/en/source/tutorial/203-model.md
index 5c3cb0b7e..e071cf212 100644
--- a/docs/sphinx_doc/en/source/tutorial/203-model.md
+++ b/docs/sphinx_doc/en/source/tutorial/203-model.md
@@ -44,7 +44,7 @@ The detailed parameters will be fed into the corresponding model class's constru
{
# Basic parameters
"config_name": "gpt-4-temperature-0.0", # Model configuration name
- "model_type": "openai", # Correspond to `ModelWrapper` type
+ "model_type": "openai_chat", # Correspond to `ModelWrapper` type
# Detailed parameters
# ...
@@ -63,7 +63,7 @@ It corresponds to the `model_type` field in the `ModelWrapper` class in the sour
class OpenAIChatWrapper(OpenAIWrapperBase):
"""The model wrapper for OpenAI's chat API."""
- model_type: str = "openai"
+ model_type: str = "openai_chat"
# ...
```
@@ -72,7 +72,7 @@ In the current AgentScope, the supported `model_type` types, the corresponding
| API | Task | Model Wrapper | `model_type` | Some Supported Models |
|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------------------|--------------------------------------------------|
-| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai"` | gpt-4, gpt-3.5-turbo, ... |
+| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_chat"` | gpt-4, gpt-3.5-turbo, ... |
| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_embedding"` | text-embedding-ada-002, ... |
| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_dall_e"` | dall-e-2, dall-e-3 |
| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | `"dashscope_chat"` | qwen-plus, qwen-max, ... |
@@ -98,9 +98,9 @@ Here we provide example configurations for different model wrappers.
OpenAI Chat API (agents.models.OpenAIChatWrapper
)
```python
-openai_chat_config = {
+{
"config_name": "{your_config_name}",
- "model_type": "openai",
+ "model_type": "openai_chat",
# Required parameters
"model_name": "gpt-4",
@@ -273,7 +273,7 @@ openai_chat_config = {
"model_type": "gemini_chat",
# Required parameters
- "model_name": "{model_name}", # The model name in Gemini API, e.g. gemini-prp
+ "model_name": "{model_name}", # The model name in Gemini API, e.g. gemini-pro
# Optional parameters
"api_key": "{your_api_key}", # If not provided, the API key will be read from the environment variable GEMINI_API_KEY
@@ -291,7 +291,7 @@ openai_chat_config = {
"model_type": "gemini_embedding",
# Required parameters
- "model_name": "{model_name}", # The model name in Gemini API, e.g. gemini-prp
+ "model_name": "{model_name}", # The model name in Gemini API, e.g. models/embedding-001
# Optional parameters
"api_key": "{your_api_key}", # If not provided, the API key will be read from the environment variable GEMINI_API_KEY
diff --git a/docs/sphinx_doc/en/source/tutorial/204-service.md b/docs/sphinx_doc/en/source/tutorial/204-service.md
index 9a79a3121..be4916684 100644
--- a/docs/sphinx_doc/en/source/tutorial/204-service.md
+++ b/docs/sphinx_doc/en/source/tutorial/204-service.md
@@ -16,6 +16,7 @@ The following table outlines the various Service functions by type. These functi
| --------------------------- | --------------------- | -------------------------------------------------------------------------------------------------------------- |
| Code | `execute_python_code` | Execute a piece of Python code, optionally inside a Docker container. |
| Retrieval | `retrieve_from_list` | Retrieve a specific item from a list based on given criteria. |
+| | `cos_sim` | Compute the cosine similarity between two different embeddings. |
| SQL Query | `query_mysql` | Execute SQL queries on a MySQL database and return results. |
| | `query_sqlite` | Execute SQL queries on a SQLite database and return results. |
| | `query_mongodb` | Perform queries or operations on a MongoDB collection. |
diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/103-example.md b/docs/sphinx_doc/zh_CN/source/tutorial/103-example.md
index 780bfe874..d2f851f02 100644
--- a/docs/sphinx_doc/zh_CN/source/tutorial/103-example.md
+++ b/docs/sphinx_doc/zh_CN/source/tutorial/103-example.md
@@ -15,7 +15,7 @@ AgentScope内置了灵活的通信机制。在本教程中,我们将通过一
```python
model_config = {
"config_name": "{config_name}", # A unique name for the model config.
- "model_type": "openai", # Choose from "openai", "openai_dall_e", or "openai_embedding".
+ "model_type": "openai_chat", # Choose from "openai_chat", "openai_dall_e", or "openai_embedding".
"model_name": "{model_name}", # The model identifier used in the OpenAI API, such as "gpt-3.5-turbo", "gpt-4", or "text-embedding-ada-002".
"api_key": "xxx", # Your OpenAI API key. If unset, the environment variable OPENAI_API_KEY is used.
diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md b/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md
index eb2db904e..cf0ebfb70 100644
--- a/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md
+++ b/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md
@@ -24,7 +24,7 @@
[
{
"config_name": "gpt-4-temperature-0.0",
- "model_type": "openai",
+ "model_type": "openai_chat",
"model_name": "gpt-4",
"api_key": "xxx",
"organization": "xxx",
diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md b/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md
index 59bf0d6fa..a14ee55c8 100644
--- a/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md
+++ b/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md
@@ -43,7 +43,8 @@ class AgentBase(Operator):
# messages it has observed. This method can be used to enrich the
# agent's understanding and memory without producing an immediate
# response.
- self.memory.add(x)
+ if self.memory:
+ self.memory.add(x)
def reply(self, x: dict = None) -> dict:
# The core method to be implemented by custom agents. It defines the
@@ -87,24 +88,31 @@ class AgentBase(Operator):
def reply(self, x: dict = None) -> dict:
# Additional processing steps can occur here
+ # Record the input if needed
if self.memory:
- self.memory.add(x) # Update the memory with the input
+ self.memory.add(x)
# Generate a prompt for the language model using the system prompt and memory
- prompt = self.engine.join(
- self.sys_prompt,
- self.memory and self.memory.get_memory(),
+ prompt = self.model.format(
+ Msg("system", self.sys_prompt, role="system"),
+ self.memory
+ and self.memory.get_memory()
+ or x, # type: ignore[arg-type]
)
# Invoke the language model with the prepared prompt
response = self.model(prompt).text
- # Format the response and create a message object
- msg = Msg(self.name, response)
+ #Format the response and create a message object
+ msg = Msg(self.name, response, role="assistant")
+
+ # Print/speak the message in this agent's voice
+ self.speak(msg)
# Record the message to memory and return it
if self.memory:
self.memory.add(msg)
+
return msg
```
diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md b/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md
index 99c4dec93..f2384c8da 100644
--- a/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md
+++ b/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md
@@ -32,7 +32,7 @@ agentscope.init(model_configs=MODEL_CONFIG_OR_PATH)
model_configs = [
{
"config_name": "gpt-4-temperature-0.0",
- "model_type": "openai",
+ "model_type": "openai_chat",
"model_name": "gpt-4",
"api_key": "xxx",
"organization": "xxx",
@@ -65,7 +65,7 @@ AgentScope中,模型配置是一个字典,用于指定模型的类型以及
{
# 基础参数
"config_name": "gpt-4-temperature-0.0", # 模型配置名称
- "model_type": "openai", # 对应`ModelWrapper`类型
+ "model_type": "openai_chat", # 对应`ModelWrapper`类型
# 详细参数
# ...
@@ -83,7 +83,7 @@ AgentScope中,模型配置是一个字典,用于指定模型的类型以及
class OpenAIChatWrapper(OpenAIWrapper):
"""The model wrapper for OpenAI's chat API."""
- model_type: str = "openai"
+ model_type: str = "openai_chat"
# ...
```
@@ -92,7 +92,7 @@ API如下:
| API | Task | Model Wrapper | `model_type` | Some Supported Models |
|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------------------|--------------------------------------------------|
-| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai"` | gpt-4, gpt-3.5-turbo, ... |
+| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_chat"` | gpt-4, gpt-3.5-turbo, ... |
| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_embedding"` | text-embedding-ada-002, ... |
| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_dall_e"` | dall-e-2, dall-e-3 |
| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | `"dashscope_chat"` | qwen-plus, qwen-max, ... |
@@ -118,9 +118,9 @@ API如下:
OpenAI Chat API (agents.models.OpenAIChatWrapper
)
```python
-openai_chat_config = {
+{
"config_name": "{your_config_name}",
- "model_type": "openai",
+ "model_type": "openai_chat",
# 必要参数
"model_name": "gpt-4",
@@ -293,7 +293,7 @@ openai_chat_config = {
"model_type": "gemini_chat",
# 必要参数
- "model_name": "{model_name}", # Gemini Chat API中的模型名,例如:gemini-prp
+ "model_name": "{model_name}", # Gemini Chat API中的模型名,例如:gemini-pro
# 可选参数
"api_key": "{your_api_key}", # 如果没有提供,将从环境变量GEMINI_API_KEY中读取
@@ -311,7 +311,7 @@ openai_chat_config = {
"model_type": "gemini_embedding",
# 必要参数
- "model_name": "{model_name}", # Gemini Embedding API中的模型名,例如:gemini-prp
+ "model_name": "{model_name}", # Gemini Embedding API中的模型名,例如:models/embedding-001
# 可选参数
"api_key": "{your_api_key}", # 如果没有提供,将从环境变量GEMINI_API_KEY中读取
diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md b/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md
index a70672d86..72d1bc7ae 100644
--- a/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md
+++ b/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md
@@ -13,6 +13,7 @@
| ------------------ | --------------------- | ---------------------------------------------------------------- |
| 代码 | `execute_python_code` | 执行一段 Python 代码,可选择在 Docker
容器内部执行。 |
| 检索 | `retrieve_from_list` | 根据给定的标准从列表中检索特定项目。 |
+| | `cos_sim` | 计算2个embedding的余弦相似度。 |
| SQL查询 | `query_mysql` | 在 MySQL 数据库上执行 SQL 查询并返回结果。 |
| | `query_sqlite` | 在 SQLite 数据库上执行 SQL 查询并返回结果。 |
| | `query_mongodb` | 对 MongoDB 集合执行查询或操作。 |
diff --git a/examples/0_jupyter_example_template/code/complete_code.py b/examples/0_jupyter_example_template/code/complete_code.py
new file mode 100644
index 000000000..576b02543
--- /dev/null
+++ b/examples/0_jupyter_example_template/code/complete_code.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+# Your complete code here.
diff --git a/examples/0_jupyter_example_template/main.ipynb b/examples/0_jupyter_example_template/main.ipynb
new file mode 100644
index 000000000..6af54e9ec
--- /dev/null
+++ b/examples/0_jupyter_example_template/main.ipynb
@@ -0,0 +1,101 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Your Example Title Here\n",
+ "\n",
+ "This example will show\n",
+ "- How to ...\n",
+ "- How to ...\n",
+ "\n",
+ "\n",
+ "## Background (If needed)\n",
+ "\n",
+ "The background of your example here. \n",
+ "\n",
+ "\n",
+ "## Tested Models\n",
+ "\n",
+ "These models are tested in this example. For other models, some modifications may be needed. \n",
+ "- xxx\n",
+ "- xxx \n",
+ "\n",
+ "\n",
+ "## Prerequisites\n",
+ "\n",
+ "Fill the next cell to meet the following requirements \n",
+ "- The requirements to execute this example\n",
+ "- ...\n",
+ "- [Optional] Optional requirements\n",
+ "- ...\n"
+ ],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "5f0c2fc19efe575e"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [
+ "# The prerequisites, e.g.\n",
+ "bing_api_key = \"xxx\"\n",
+ "\n",
+ "your_model_configuration_name = \"xxx\"\n",
+ "\n",
+ "your_model_configuration = {\n",
+ " \"model_type\": \"xxx\", \n",
+ " \"config_name\": your_model_configuration_name,\n",
+ " \n",
+ " # ...\n",
+ "}"
+ ],
+ "metadata": {
+ "collapsed": false,
+ "is_executing": true
+ },
+ "id": "596cca552b163051"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "966b96e4893709a5"
+ },
+ {
+ "cell_type": "markdown",
+ "source": [],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "7cbb4630bf5e2c15"
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 2
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython2",
+ "version": "2.7.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/0_python_example_template/README.md b/examples/0_python_example_template/README.md
new file mode 100644
index 000000000..9ce277ec7
--- /dev/null
+++ b/examples/0_python_example_template/README.md
@@ -0,0 +1,26 @@
+# Your Example Title Here
+
+This example will show
+- How to ...
+- How to ...
+
+
+## Background (If needed)
+
+The background of your example here.
+
+
+## Tested Models
+
+These models are tested in this example. For other models, some modifications may be needed.
+- xxx
+- xxx
+
+
+## Prerequisites
+
+Fill the next cell to meet the following requirements
+- The requirements to execute this example
+- ...
+- [Optional] Optional requirements
+- ...
diff --git a/examples/0_python_example_template/main.py b/examples/0_python_example_template/main.py
new file mode 100644
index 000000000..576b02543
--- /dev/null
+++ b/examples/0_python_example_template/main.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+# Your complete code here.
diff --git a/examples/conversation_basic/conversation.py b/examples/conversation_basic/conversation.py
index f19d5dd1b..b482b557e 100644
--- a/examples/conversation_basic/conversation.py
+++ b/examples/conversation_basic/conversation.py
@@ -12,7 +12,7 @@ def main() -> None:
agentscope.init(
model_configs=[
{
- "model_type": "openai",
+ "model_type": "openai_chat",
"config_name": "gpt-3.5-turbo",
"model_name": "gpt-3.5-turbo",
"api_key": "xxx", # Load from env if not provided
diff --git a/examples/conversation_nl2sql/configs/model_configs.json b/examples/conversation_nl2sql/configs/model_configs.json
index fa8ebbf7b..f12a060d0 100644
--- a/examples/conversation_nl2sql/configs/model_configs.json
+++ b/examples/conversation_nl2sql/configs/model_configs.json
@@ -1,7 +1,7 @@
[
{
"config_name": "gpt-3.5-turbo",
- "model_type": "openai",
+ "model_type": "openai-chat",
"model_name": "gpt-3.5-turbo",
"api_key": "xxx",
"organization": "xxx",
@@ -11,7 +11,7 @@
},
{
"config_name": "gpt-4",
- "model_type": "openai",
+ "model_type": "openai-chat",
"model_name": "gpt-4",
"api_key": "xxx",
"organization": "xxx",
diff --git a/examples/conversation_self_organizing/auto-discussion.py b/examples/conversation_self_organizing/auto-discussion.py
index 82f56dba3..b30e8ee33 100644
--- a/examples/conversation_self_organizing/auto-discussion.py
+++ b/examples/conversation_self_organizing/auto-discussion.py
@@ -9,7 +9,7 @@
model_configs = [
{
- "model_type": "openai",
+ "model_type": "openai_chat",
"config_name": "gpt-3.5-turbo",
"model_name": "gpt-3.5-turbo",
"api_key": "xxx", # Load from env if not provided
diff --git a/examples/conversation_with_mentions/configs/model_configs.json b/examples/conversation_with_mentions/configs/model_configs.json
index 1d1b7193c..fc82add7c 100644
--- a/examples/conversation_with_mentions/configs/model_configs.json
+++ b/examples/conversation_with_mentions/configs/model_configs.json
@@ -1,6 +1,6 @@
[
{
- "model_type": "openai",
+ "model_type": "openai_chat",
"config_name": "gpt-4",
"model_name": "gpt-4",
"api_key": "xxx",
diff --git a/examples/distributed_basic/configs/model_configs.json b/examples/distributed_basic/configs/model_configs.json
index 3700b6e49..bdc12fe73 100644
--- a/examples/distributed_basic/configs/model_configs.json
+++ b/examples/distributed_basic/configs/model_configs.json
@@ -1,7 +1,7 @@
[
{
"config_name": "gpt-4",
- "model_type": "openai",
+ "model_type": "openai_chat",
"model_name": "gpt-4",
"api_key": "xxx",
"organization": "xxx",
diff --git a/examples/distributed_debate/configs/model_configs.json b/examples/distributed_debate/configs/model_configs.json
index 3700b6e49..bdc12fe73 100644
--- a/examples/distributed_debate/configs/model_configs.json
+++ b/examples/distributed_debate/configs/model_configs.json
@@ -1,7 +1,7 @@
[
{
"config_name": "gpt-4",
- "model_type": "openai",
+ "model_type": "openai_chat",
"model_name": "gpt-4",
"api_key": "xxx",
"organization": "xxx",
diff --git a/examples/model_configs_template/dashscope_chat_template.json b/examples/model_configs_template/dashscope_chat_template.json
new file mode 100644
index 000000000..b6a88e960
--- /dev/null
+++ b/examples/model_configs_template/dashscope_chat_template.json
@@ -0,0 +1,37 @@
+[{
+ "config_name": "dashscope_chat-qwen-max",
+ "model_type": "dashscope_chat",
+ "model_name": "qwen-max",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "temperature": 0.7
+ }
+},
+{
+ "config_name": "dashscope_chat-qwen-max-1201",
+ "model_type": "dashscope_chat",
+ "model_name": "qwen-max-1201",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "temperature": 0.7
+ }
+},
+{
+ "config_name": "dashscope_chat-qwen-turbo",
+ "model_type": "dashscope_chat",
+ "model_name": "qwen-turbo",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "temperature": 0.7
+ }
+},
+{
+ "config_name": "dashscope_chat-qwen-plus",
+ "model_type": "dashscope_chat",
+ "model_name": "qwen-plus",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "temperature": 0.7
+ }
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/dashscope_image_synthesis_template.json b/examples/model_configs_template/dashscope_image_synthesis_template.json
new file mode 100644
index 000000000..f57427e2f
--- /dev/null
+++ b/examples/model_configs_template/dashscope_image_synthesis_template.json
@@ -0,0 +1,34 @@
+[{
+ "config_name": "dashscope_image_synthesis-wanx-v1-1024x1024",
+ "model_type": "dashscope_image_synthesis",
+ "model_name": "wanx-v1",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "n": 1,
+ "negative_prompt": "{unwanted_prompt}",
+ "size": "1024*1024"
+ }
+},
+{
+ "config_name": "dashscope_image_synthesis-wanx-v1-720x1280",
+ "model_type": "dashscope_image_synthesis",
+ "model_name": "wanx-v1",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "n": 1,
+ "negative_prompt": "{unwanted_prompt}",
+ "size": "720*1280"
+ }
+},
+{
+ "config_name": "dashscope_image_synthesis-wanx-v1-1280x720",
+ "model_type": "dashscope_image_synthesis",
+ "model_name": "wanx-v1",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ "n": 1,
+ "negative_prompt": "{unwanted_prompt}",
+ "size": "1280*720"
+ }
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/dashscope_multimodal_template.json b/examples/model_configs_template/dashscope_multimodal_template.json
new file mode 100644
index 000000000..8c6ef792b
--- /dev/null
+++ b/examples/model_configs_template/dashscope_multimodal_template.json
@@ -0,0 +1,40 @@
+[{
+ "config_name": "dashscope_multimodal-qwen-vl-plus",
+ "model_type": "dashscope_multimodal",
+ "model_name": "qwen-vl-plus",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ }
+},
+{
+ "config_name": "dashscope_multimodal-qwen-vl-max",
+ "model_type": "dashscope_multimodal",
+ "model_name": "qwen-vl-max",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ }
+},
+{
+ "config_name": "dashscope_multimodal-qwen-audio-turbo",
+ "model_type": "dashscope_multimodal",
+ "model_name": "qwen-audio-turbo",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ }
+},
+{
+ "config_name": "dashscope_multimodal-qwen-vl-chat-v1",
+ "model_type": "dashscope_multimodal",
+ "model_name": "qwen-vl-chat-v1",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ }
+},{
+ "config_name": "dashscope_multimodal-qwen-audio-chat-v1",
+ "model_type": "dashscope_multimodal",
+ "model_name": "qwen-vl-chat-v1",
+ "api_key": "{your_api_key}",
+ "generate_args": {
+ }
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/dashscope_text_embedding_template.json b/examples/model_configs_template/dashscope_text_embedding_template.json
new file mode 100644
index 000000000..8ae8d05d7
--- /dev/null
+++ b/examples/model_configs_template/dashscope_text_embedding_template.json
@@ -0,0 +1,13 @@
+[{
+ "config_name": "dashscope_text_embedding-text-embedding-v1",
+ "model_type": "dashscope_text_embedding",
+ "model_name": "text-embedding-v1",
+ "api_key": "{your_api_key}"
+},
+{
+ "config_name": "dashscope_text_embedding-text-embedding-v2",
+ "model_type": "dashscope_text_embedding",
+ "model_name": "text-embedding-v2",
+ "api_key": "{your_api_key}"
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/gemini_chat_template.json b/examples/model_configs_template/gemini_chat_template.json
new file mode 100644
index 000000000..cb602462e
--- /dev/null
+++ b/examples/model_configs_template/gemini_chat_template.json
@@ -0,0 +1,7 @@
+[{
+ "config_name": "gemini_chat-gemini-pro",
+ "model_type": "gemini_chat",
+ "model_name": "gemini-pro",
+ "api_key": "{your_api_key}"
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/gemini_embedding_template.json b/examples/model_configs_template/gemini_embedding_template.json
new file mode 100644
index 000000000..920163153
--- /dev/null
+++ b/examples/model_configs_template/gemini_embedding_template.json
@@ -0,0 +1,7 @@
+[{
+ "config_name": "gemini_chat-embedding-001",
+ "model_type": "gemini_embedding",
+ "model_name": "models/embedding-001",
+ "api_key": "{your_api_key}"
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/ollama_chat_template.json b/examples/model_configs_template/ollama_chat_template.json
new file mode 100644
index 000000000..01664f40c
--- /dev/null
+++ b/examples/model_configs_template/ollama_chat_template.json
@@ -0,0 +1,37 @@
+[{
+ "config_name": "ollama_chat-llama2",
+ "model_type": "ollama_chat",
+ "model_name": "llama2",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_chat-mistral",
+ "model_type": "ollama_chat",
+ "model_name": "mistral",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_chat-qwen:0.5b",
+ "model_type": "ollama_chat",
+ "model_name": "qwen:0.5b",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_chat-codellama",
+ "model_type": "ollama_chat",
+ "model_name": "codellama",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/ollama_embedding_template.json b/examples/model_configs_template/ollama_embedding_template.json
new file mode 100644
index 000000000..c7d5a427d
--- /dev/null
+++ b/examples/model_configs_template/ollama_embedding_template.json
@@ -0,0 +1,28 @@
+[{
+ "config_name": "ollama_embedding-llama2",
+ "model_type": "ollama_embedding",
+ "model_name": "llama2",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_embedding-mistral",
+ "model_type": "ollama_embedding",
+ "model_name": "mistral",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_embedding-qwen:0.5b",
+ "model_type": "ollama_embedding",
+ "model_name": "qwen:0.5b",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/ollama_generate_template.json b/examples/model_configs_template/ollama_generate_template.json
new file mode 100644
index 000000000..9e21c52b7
--- /dev/null
+++ b/examples/model_configs_template/ollama_generate_template.json
@@ -0,0 +1,28 @@
+[{
+ "config_name": "ollama_generate-llama2",
+ "model_type": "ollama_generate",
+ "model_name": "llama2",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_generate-mistral",
+ "model_type": "ollama_generate",
+ "model_name": "mistral",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+},
+{
+ "config_name": "ollama_generate-qwen:0.5b",
+ "model_type": "ollama_generate",
+ "model_name": "qwen:0.5b",
+ "options": {
+ "temperature": 0.7
+ },
+ "keep_alive": "5m"
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/openai_chat_template.json b/examples/model_configs_template/openai_chat_template.json
new file mode 100644
index 000000000..8d3f78087
--- /dev/null
+++ b/examples/model_configs_template/openai_chat_template.json
@@ -0,0 +1,25 @@
+[{
+ "config_name": "openai_chat_gpt-4",
+ "model_type": "openai_chat",
+ "model_name": "gpt-4",
+ "api_key": "{your_api_key}",
+ "client_args": {
+ "max_retries": 3
+ },
+ "generate_args": {
+ "temperature": 0.7
+ }
+},
+{
+ "config_name": "openai_chat_gpt-3.5-turbo",
+ "model_type": "openai_chat",
+ "model_name": "gpt-3.5-turbo",
+ "api_key": "{your_api_key}",
+ "client_args": {
+ "max_retries": 3
+ },
+ "generate_args": {
+ "temperature": 0.7
+ }
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/openai_dall_e_template.json b/examples/model_configs_template/openai_dall_e_template.json
new file mode 100644
index 000000000..9f8fa9d3e
--- /dev/null
+++ b/examples/model_configs_template/openai_dall_e_template.json
@@ -0,0 +1,27 @@
+[{
+ "config_name": "openai_dall_e-dall-e-2",
+ "model_type": "openai_dall_e",
+ "model_name": "dall-e-2",
+ "api_key": "{your_api_key}",
+ "client_args": {
+ "max_retries": 3
+ },
+ "generate_args": {
+ "n": 1,
+ "size": "512x512"
+ }
+},
+{
+ "config_name": "openai_dall_e-dall-e-3",
+ "model_type": "openai_dall_e",
+ "model_name": "dall-e-3",
+ "api_key": "{your_api_key}",
+ "client_args": {
+ "max_retries": 3
+ },
+ "generate_args": {
+ "n": 1,
+ "size": "512x512"
+ }
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/openai_embedding_template.json b/examples/model_configs_template/openai_embedding_template.json
new file mode 100644
index 000000000..5b21aa1f9
--- /dev/null
+++ b/examples/model_configs_template/openai_embedding_template.json
@@ -0,0 +1,13 @@
+[{
+ "config_name": "openai_embedding-text-embedding-ada-002",
+ "model_type": "openai_embedding",
+ "model_name": "text-embedding-ada-002",
+ "api_key": "{your_api_key}",
+ "client_args": {
+ "max_retries": 3
+ },
+ "generate_args": {
+ "temperature": 0.7
+ }
+}
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/postapi_model_config_template.json b/examples/model_configs_template/postapi_model_config_template.json
new file mode 100644
index 000000000..30442d025
--- /dev/null
+++ b/examples/model_configs_template/postapi_model_config_template.json
@@ -0,0 +1,49 @@
+[{
+ "config_name": "post_api-flask_llama2-7b-chat-hf",
+ "model_type": "post_api",
+ "api_url": "http://127.0.0.1:8000/llm/",
+ "json_args": {
+ "max_length": 4096,
+ "temperature": 0.5
+ }
+},
+{
+ "config_name": "post_api-flask_llama2-7b-chat-ms",
+ "model_type": "post_api",
+ "api_url": "http://127.0.0.1:8000/llm/",
+ "json_args": {
+ "max_length": 4096,
+ "temperature": 0.5
+ }
+},
+{
+ "config_name": "post_api-fastchat_llama2-7b-chat-hf",
+ "model_type": "openai_chat",
+ "api_key": "EMPTY",
+ "client_args": {
+ "base_url": "http://127.0.0.1:8000/v1/"
+ },
+ "generate_args": {
+ "temperature": 0.5
+ }
+},
+{
+ "config_name": "post_api-vllm_llama2-7b-chat-hf",
+ "model_type": "openai_chat",
+ "api_key": "EMPTY",
+ "client_args": {
+ "base_url": "http://127.0.0.1:8000/v1/"
+ },
+ "generate_args": {
+ "temperature": 0.5
+ }
+},
+{
+ "config_name": "post_api-model-inference-api-gpt2",
+ "model_type": "post_api",
+ "headers": {
+ "Authorization": "Bearer {YOUR_API_TOKEN}"
+ },
+ "api_url": "https://api-inference.huggingface.co/models/gpt2"
+}
+]
\ No newline at end of file
diff --git a/notebook/conversation.ipynb b/notebook/conversation.ipynb
index cfef40067..ea4836f03 100644
--- a/notebook/conversation.ipynb
+++ b/notebook/conversation.ipynb
@@ -49,7 +49,7 @@
"agentscope.init(\n",
" model_configs=[\n",
" {\n",
- " \"model_type\": \"openai\",\n",
+ " \"model_type\": \"openai_chat\",\n",
" \"config_name\": \"gpt-3.5-turbo\",\n",
" \"model_name\": \"gpt-3.5-turbo\",\n",
" \"api_key\": \"xxx\", # Load from env if not provided\n",
diff --git a/notebook/distributed_debate.ipynb b/notebook/distributed_debate.ipynb
index e2a0c21f4..bec6c22f9 100644
--- a/notebook/distributed_debate.ipynb
+++ b/notebook/distributed_debate.ipynb
@@ -48,7 +48,7 @@
"source": [
"model_configs = [\n",
" {\n",
- " \"model_type\": \"openai\",\n",
+ " \"model_type\": \"openai_chat\",\n",
" \"config_name\": \"gpt-3.5-turbo\",\n",
" \"model_name\": \"gpt-3.5-turbo\",\n",
" \"api_key\": \"xxx\",\n",
@@ -58,7 +58,7 @@
" },\n",
" },\n",
" {\n",
- " \"model_type\": \"openai\",\n",
+ " \"model_type\": \"openai_chat\",\n",
" \"config_name\": \"gpt-4\",\n",
" \"model_name\": \"gpt-4\",\n",
" \"api_key\": \"xxx\",\n",
diff --git a/notebook/distributed_dialog.ipynb b/notebook/distributed_dialog.ipynb
index cb20c6a46..ab01224b0 100644
--- a/notebook/distributed_dialog.ipynb
+++ b/notebook/distributed_dialog.ipynb
@@ -41,7 +41,7 @@
"source": [
"model_configs = [\n",
" {\n",
- " \"model_type\": \"openai\",\n",
+ " \"model_type\": \"openai_chat\",\n",
" \"config_name\": \"gpt-3.5-turbo\",\n",
" \"model_name\": \"gpt-3.5-turbo\",\n",
" \"api_key\": \"xxx\",\n",
@@ -51,7 +51,7 @@
" },\n",
" },\n",
" {\n",
- " \"model_type\": \"openai\",\n",
+ " \"model_type\": \"openai_chat\",\n",
" \"config_name\": \"gpt-4\",\n",
" \"model_name\": \"gpt-4\",\n",
" \"api_key\": \"xxx\",\n",
diff --git a/scripts/README.md b/scripts/README.md
index fa0865eb7..f3d07fa2b 100644
--- a/scripts/README.md
+++ b/scripts/README.md
@@ -141,7 +141,7 @@ In AgentScope, you can load the model with the following model configs: `./flask
```json
{
"model_type": "post_api",
- "config_name": "flask_llama2-7b-chat",
+ "config_name": "flask_llama2-7b-chat-hf",
"api_url": "http://127.0.0.1:8000/llm/",
"json_args": {
"max_length": 4096,
@@ -166,17 +166,17 @@ Install Flask and modelscope by following command.
pip install flask torch modelscope
```
-Taking model `modelscope/Llama-2-7b-ms` and port `8000` as an example,
+Taking model `modelscope/Llama-2-7b-chat-ms` and port `8000` as an example,
to set up the model API serving, run the following command.
```bash
python flask_modelscope/setup_ms_service.py \
- --model_name_or_path modelscope/Llama-2-7b-ms \
+ --model_name_or_path modelscope/Llama-2-7b-chat-ms \
--device "cuda:0" \
--port 8000
```
-You can replace `modelscope/Llama-2-7b-ms` with any model card in
+You can replace `modelscope/Llama-2-7b-chat-ms` with any model card in
modelscope model hub.
##### How to use in AgentScope
@@ -187,7 +187,7 @@ In AgentScope, you can load the model with the following model configs:
```json
{
"model_type": "post_api",
- "config_name": "flask_llama2-7b-ms",
+ "config_name": "flask_llama2-7b-chat-ms",
"api_url": "http://127.0.0.1:8000/llm/",
"json_args": {
"max_length": 4096,
@@ -218,7 +218,7 @@ Taking model `meta-llama/Llama-2-7b-chat-hf` and port `8000` as an example,
to set up model API serving, run the following command to set up model serving.
```bash
-bash fastchat_script/fastchat_setup.sh -m meta-llama/Llama-2-7b-chat-hf -p 8000
+bash fastchat/fastchat_setup.sh -m meta-llama/Llama-2-7b-chat-hf -p 8000
```
#### Supported Models
@@ -229,12 +229,12 @@ of FastChat.
#### How to use in AgentScope
-Now you can load the model in AgentScope by the following model config: `fastchat_script/model_config.json`.
+Now you can load the model in AgentScope by the following model config: `fastchat/model_config.json`.
```json
{
- "model_type": "openai",
- "config_name": "meta-llama/Llama-2-7b-chat-hf",
+ "model_type": "openai_chat",
+ "config_name": "fastchat_llama2-7b-chat-hf",
"api_key": "EMPTY",
"client_args": {
"base_url": "http://127.0.0.1:8000/v1/"
@@ -262,7 +262,7 @@ Taking model `meta-llama/Llama-2-7b-chat-hf` and port `8000` as an example,
to set up model API serving, run
```bash
-./vllm_script/vllm_setup.sh -m meta-llama/Llama-2-7b-chat-hf -p 8000
+./vllm/vllm_setup.sh -m meta-llama/Llama-2-7b-chat-hf -p 8000
```
#### Supported models
@@ -273,11 +273,11 @@ of vllm.
#### How to use in AgentScope
-Now you can load the model in AgentScope by the following model config: `vllm_script/model_config.json`.
+Now you can load the model in AgentScope by the following model config: `vllm/model_config.json`.
```json
{
- "model_type": "openai",
+ "model_type": "openai_chat",
"config_name": "meta-llama/Llama-2-7b-chat-hf",
"api_key": "EMPTY",
"client_args": {
diff --git a/scripts/faschat/fastchat_setup.sh b/scripts/fastchat/fastchat_setup.sh
similarity index 88%
rename from scripts/faschat/fastchat_setup.sh
rename to scripts/fastchat/fastchat_setup.sh
index 9b755090e..6e11cea98 100644
--- a/scripts/faschat/fastchat_setup.sh
+++ b/scripts/fastchat/fastchat_setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-model_name_or_path="llama-2"
+model_name_or_path="meta-llama/Llama-2-7b-chat-hf"
port=8000
while getopts "m:p:" flag
diff --git a/scripts/faschat/model_config.json b/scripts/fastchat/model_config.json
similarity index 63%
rename from scripts/faschat/model_config.json
rename to scripts/fastchat/model_config.json
index 708793f99..602e75bf8 100644
--- a/scripts/faschat/model_config.json
+++ b/scripts/fastchat/model_config.json
@@ -1,7 +1,6 @@
{
- "model_type": "openai",
- "config_name": "fs-llama-2",
- "model_name": "llama-2",
+ "model_type": "openai_chat",
+ "config_name": "fastchat_llama2-7b-chat-hf",
"api_key": "EMPTY",
"client_args": {
"base_url": "http:localhost:8000/v1/"
diff --git a/scripts/flask_modelscope/model_config.json b/scripts/flask_modelscope/model_config.json
index 20d05f67c..846866923 100644
--- a/scripts/flask_modelscope/model_config.json
+++ b/scripts/flask_modelscope/model_config.json
@@ -1,6 +1,6 @@
{
"model_type": "post_api",
- "config_name": "post_llama-2-chat-7b-ms",
+ "config_name": "flask_llama2-7b-chat-ms",
"api_url": "http://127.0.0.1:8000/llm/",
"json_args": {
"max_length": 4096,
diff --git a/scripts/flask_modelscope/setup_ms_service.py b/scripts/flask_modelscope/setup_ms_service.py
index be5e151ef..100e69ea0 100644
--- a/scripts/flask_modelscope/setup_ms_service.py
+++ b/scripts/flask_modelscope/setup_ms_service.py
@@ -24,9 +24,9 @@ def get_response() -> dict:
prompt = json.pop("inputs")
- global model, tokenizer, device
+ global model, tokenizer
- prompt_tokenized = tokenizer(prompt, return_tensors="pt").to(device)
+ prompt_tokenized = tokenizer(prompt, return_tensors="pt").to(model.device)
response_ids = model.generate(
prompt_tokenized.input_ids,
@@ -57,16 +57,11 @@ def get_response() -> dict:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, required=True)
- parser.add_argument("--device", type=str, default="cuda")
+ parser.add_argument("--device", type=str, default="auto")
parser.add_argument("--port", type=int, default=8000)
args = parser.parse_args()
- global model, tokenizer, device
-
- if args.device == "auto":
- device = "cuda"
- else:
- device = args.device
+ global model, tokenizer
model = modelscope.AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
diff --git a/scripts/flask_transformers/model_config.json b/scripts/flask_transformers/model_config.json
index d532fe407..9c96b114c 100644
--- a/scripts/flask_transformers/model_config.json
+++ b/scripts/flask_transformers/model_config.json
@@ -1,6 +1,6 @@
{
"model_type": "post_api",
- "config_name": "post_llama-2-chat-7b-hf",
+ "config_name": "flask_llama-2-chat-7b-hf",
"api_url": "http://127.0.0.1:8000/llm/",
"json_args": {
"max_length": 4096,
diff --git a/scripts/flask_transformers/setup_hf_service.py b/scripts/flask_transformers/setup_hf_service.py
index 09a44d6e1..13eae5ed4 100644
--- a/scripts/flask_transformers/setup_hf_service.py
+++ b/scripts/flask_transformers/setup_hf_service.py
@@ -24,9 +24,9 @@ def get_response() -> dict:
prompt = json.pop("inputs")
- global model, tokenizer, device
+ global model, tokenizer
- prompt_tokenized = tokenizer(prompt, return_tensors="pt").to(device)
+ prompt_tokenized = tokenizer(prompt, return_tensors="pt").to(model.device)
response_ids = model.generate(
prompt_tokenized.input_ids,
@@ -57,16 +57,11 @@ def get_response() -> dict:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, required=True)
- parser.add_argument("--device", type=str, default="cuda")
+ parser.add_argument("--device", type=str, default="auto")
parser.add_argument("--port", type=int, default=8000)
args = parser.parse_args()
- global model, tokenizer, device
-
- if args.device == "auto":
- device = "cuda"
- else:
- device = args.device
+ global model, tokenizer
model = transformers.AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
diff --git a/scripts/vllm_script/model_config.json b/scripts/vllm/model_config.json
similarity index 77%
rename from scripts/vllm_script/model_config.json
rename to scripts/vllm/model_config.json
index 3932c3bec..bcb469c3a 100644
--- a/scripts/vllm_script/model_config.json
+++ b/scripts/vllm/model_config.json
@@ -1,7 +1,6 @@
{
- "model_type": "openai",
+ "model_type": "openai_chat",
"config_name": "vllm-llama-2",
- "model_name": "llama-2",
"api_key": "EMPTY",
"client_args": {
"base_url": "http:localhost:8000/v1/"
diff --git a/scripts/vllm_script/vllm_setup.sh b/scripts/vllm/vllm_setup.sh
similarity index 86%
rename from scripts/vllm_script/vllm_setup.sh
rename to scripts/vllm/vllm_setup.sh
index af7f4f984..6cfaa2b8b 100644
--- a/scripts/vllm_script/vllm_setup.sh
+++ b/scripts/vllm/vllm_setup.sh
@@ -2,7 +2,7 @@
# pip3 install vllm
-model_name_or_path="llama-2"
+model_name_or_path="meta-llama/Llama-2-7b-chat-hf"
port=8000
while getopts "m:p:" flag
diff --git a/src/agentscope/models/dashscope_model.py b/src/agentscope/models/dashscope_model.py
index c43429d0e..875891cf1 100644
--- a/src/agentscope/models/dashscope_model.py
+++ b/src/agentscope/models/dashscope_model.py
@@ -378,7 +378,6 @@ def __call__(
response = dashscope.ImageSynthesis.call(
model=self.model_name,
prompt=prompt,
- n=1,
**kwargs,
)
if response.status_code != HTTPStatus.OK:
@@ -649,10 +648,12 @@ def __call__(
)
# step5: return response
+ content = response.output["choices"][0]["message"]["content"]
+ if isinstance(content, list):
+ content = content[0]["text"]
+
return ModelResponse(
- text=response.output["choices"][0]["message"]["content"][0][
- "text"
- ],
+ text=content,
raw=response,
)
diff --git a/src/agentscope/models/model.py b/src/agentscope/models/model.py
index f7a953d6f..efc5a2c07 100644
--- a/src/agentscope/models/model.py
+++ b/src/agentscope/models/model.py
@@ -6,7 +6,7 @@
{
"config_name": "{config_name}",
- "model_type": "openai" | "post_api" | ...,
+ "model_type": "openai_chat" | "post_api" | ...,
...
}
@@ -20,7 +20,7 @@
{
"config_name": "{id of your model}",
- "model_type": "openai",
+ "model_type": "openai_chat",
"model_name": "{model_name_for_openai, e.g. gpt-3.5-turbo}",
"api_key": "{your_api_key}",
"organization": "{your_organization, if needed}",
diff --git a/src/agentscope/models/ollama_model.py b/src/agentscope/models/ollama_model.py
index 3a969e248..6c15b5eb0 100644
--- a/src/agentscope/models/ollama_model.py
+++ b/src/agentscope/models/ollama_model.py
@@ -46,6 +46,7 @@ def __init__(
model_name: str,
options: dict = None,
keep_alive: str = "5m",
+ **kwargs: Any,
) -> None:
"""Initialize the model wrapper for Ollama API.
diff --git a/src/agentscope/models/post_model.py b/src/agentscope/models/post_model.py
index f8575b81b..aec07137d 100644
--- a/src/agentscope/models/post_model.py
+++ b/src/agentscope/models/post_model.py
@@ -214,7 +214,7 @@ def format(
class PostAPIDALLEWrapper(PostAPIModelWrapperBase):
- """A post api model wrapper compatible with openai dalle"""
+ """A post api model wrapper compatible with openai dall_e"""
model_type: str = "post_api_dall_e"
diff --git a/tests/dashscope_test.py b/tests/dashscope_test.py
index 0a6bec7d3..13a67baa6 100644
--- a/tests/dashscope_test.py
+++ b/tests/dashscope_test.py
@@ -168,7 +168,7 @@ def test_image_synthesis_wrapper_call_failure(
# Call the wrapper with prompt and expect a RuntimeError
prompt = "Generate an image of a sunset"
with self.assertRaises(RuntimeError) as context:
- self.wrapper(prompt, save_local=False)
+ self.wrapper(prompt, save_local=False, n=1)
# Assert the expected exception message
self.assertIn("Error Code", str(context.exception))
diff --git a/tests/model_test.py b/tests/model_test.py
index dc432879f..f361b7f0d 100644
--- a/tests/model_test.py
+++ b/tests/model_test.py
@@ -45,7 +45,7 @@ def test_model_registry(self) -> None:
)
# get model wrapper class by model type
self.assertEqual(
- _get_model_wrapper(model_type="openai"),
+ _get_model_wrapper(model_type="openai_chat"),
OpenAIChatWrapper,
)
# return PostAPIModelWrapperBase if model_type is not supported
diff --git a/tests/prompt_engine_test.py b/tests/prompt_engine_test.py
index 07324579c..1050c3d30 100644
--- a/tests/prompt_engine_test.py
+++ b/tests/prompt_engine_test.py
@@ -38,7 +38,7 @@ def setUp(self) -> None:
},
},
{
- "model_type": "openai",
+ "model_type": "openai_chat",
"config_name": "gpt-4",
"model_name": "gpt-4",
"api_key": "xxx",