diff --git a/notebook/tools_dependency_injection.ipynb b/notebook/tools_dependency_injection.ipynb
index c34dd9fdcd..c3cb80d3c3 100644
--- a/notebook/tools_dependency_injection.ipynb
+++ b/notebook/tools_dependency_injection.ipynb
@@ -47,9 +47,18 @@
},
{
"cell_type": "code",
- "execution_count": 29,
+ "execution_count": 1,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+ " from .autonotebook import tqdm as notebook_tqdm\n"
+ ]
+ }
+ ],
"source": [
"import os\n",
"from typing import Annotated, Literal\n",
@@ -71,7 +80,7 @@
},
{
"cell_type": "code",
- "execution_count": 30,
+ "execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@@ -129,11 +138,19 @@
},
{
"cell_type": "code",
- "execution_count": 32,
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "config_list = [{\"model\": \"gpt-4o-mini\", \"api_key\": os.environ[\"OPENAI_API_KEY\"]}]\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
- "config_list = [{\"model\": \"gpt-4o-mini\", \"api_key\": os.environ[\"OPENAI_API_KEY\"]}]\n",
"\n",
"assistant = ConversableAgent(\n",
" name=\"assistant\",\n",
@@ -294,7 +311,7 @@
},
{
"cell_type": "code",
- "execution_count": 38,
+ "execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
@@ -321,7 +338,7 @@
},
{
"cell_type": "code",
- "execution_count": 39,
+ "execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@@ -354,7 +371,7 @@
},
{
"cell_type": "code",
- "execution_count": 40,
+ "execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@@ -373,7 +390,7 @@
},
{
"cell_type": "code",
- "execution_count": 41,
+ "execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@@ -415,9 +432,87 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 9,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33muser_proxy_1\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Start by getting the weather for Sydney, Australia, and follow that up by checking if there are tickets for the 'AG2 Live' concert.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: weather_agent\n",
+ "\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mweather_agent\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_4ZWdbwuuojMxGXoxQDPzXpRl): get_weather *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"location\": \"Sydney, Australia\"}\n",
+ "\u001b[32m****************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: user_proxy_1\n",
+ "\u001b[0m\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION get_weather...\n",
+ "Call ID: call_4ZWdbwuuojMxGXoxQDPzXpRl\n",
+ "Input arguments: {'location': 'Sydney, Australia'}\u001b[0m\n",
+ "Accessing third party Weather System using username ag2weather\n",
+ "\u001b[33muser_proxy_1\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_4ZWdbwuuojMxGXoxQDPzXpRl) *****\u001b[0m\n",
+ "It's sunny and 40 degrees Celsius in Sydney, Australia.\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: ticket_agent\n",
+ "\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mticket_agent\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_MQVDpFTDcdNdKxH7LsikfR1z): tickets_available *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"concert_name\":\"AG2 Live\"}\n",
+ "\u001b[32m**********************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: user_proxy_1\n",
+ "\u001b[0m\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION tickets_available...\n",
+ "Call ID: call_MQVDpFTDcdNdKxH7LsikfR1z\n",
+ "Input arguments: {'concert_name': 'AG2 Live'}\u001b[0m\n",
+ "Accessing third party Ticketing System using username ag2tickets\n",
+ "\u001b[33muser_proxy_1\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_MQVDpFTDcdNdKxH7LsikfR1z) *****\u001b[0m\n",
+ "false\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "ChatResult(chat_id=None, chat_history=[{'content': \"Start by getting the weather for Sydney, Australia, and follow that up by checking if there are tickets for the 'AG2 Live' concert.\", 'role': 'assistant', 'name': 'user_proxy_1'}, {'content': 'None', 'tool_calls': [{'id': 'call_4ZWdbwuuojMxGXoxQDPzXpRl', 'function': {'arguments': '{\"location\": \"Sydney, Australia\"}', 'name': 'get_weather'}, 'type': 'function'}], 'name': 'weather_agent', 'role': 'assistant'}, {'content': \"It's sunny and 40 degrees Celsius in Sydney, Australia.\", 'tool_responses': [{'tool_call_id': 'call_4ZWdbwuuojMxGXoxQDPzXpRl', 'role': 'tool', 'content': \"It's sunny and 40 degrees Celsius in Sydney, Australia.\"}], 'role': 'tool', 'name': 'user_proxy_1'}, {'content': 'None', 'tool_calls': [{'id': 'call_MQVDpFTDcdNdKxH7LsikfR1z', 'function': {'arguments': '{\"concert_name\":\"AG2 Live\"}', 'name': 'tickets_available'}, 'type': 'function'}], 'name': 'ticket_agent', 'role': 'assistant'}, {'content': 'false', 'tool_responses': [{'tool_call_id': 'call_MQVDpFTDcdNdKxH7LsikfR1z', 'role': 'tool', 'content': 'false'}], 'role': 'tool', 'name': 'user_proxy_1'}], summary='false', cost={'usage_including_cached_inference': {'total_cost': 0}, 'usage_excluding_cached_inference': {'total_cost': 0}}, human_input=[])"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"groupchat = GroupChat(agents=[user_proxy, weather_agent, ticket_agent], messages=[], max_round=5)\n",
"manager = GroupChatManager(groupchat=groupchat, llm_config={\"config_list\": config_list})\n",
@@ -454,7 +549,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.11"
+ "version": "3.11.10"
}
},
"nbformat": 4,
diff --git a/website/docs/Use-Cases/walk-throughs/assets/group-chat-fsm.png b/website/docs/Use-Cases/walk-throughs/assets/group-chat-fsm.png
new file mode 100644
index 0000000000..a3faa93d35
--- /dev/null
+++ b/website/docs/Use-Cases/walk-throughs/assets/group-chat-fsm.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a1b2989787accff4ccd11f0b0da1451e2fbd389c2ca8f7399edecb0e0dad4a25
+size 286239
diff --git a/website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_01.png b/website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-01.png
similarity index 100%
rename from website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_01.png
rename to website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-01.png
diff --git a/website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_02.png b/website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-02.png
similarity index 100%
rename from website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_02.png
rename to website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-02.png
diff --git a/website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_03.png b/website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-03.png
similarity index 100%
rename from website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_03.png
rename to website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-03.png
diff --git a/website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_04.png b/website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-04.png
similarity index 100%
rename from website/docs/Use-Cases/walk-throughs/assets/swarm_enhanced_04.png
rename to website/docs/Use-Cases/walk-throughs/assets/swarm-enhanced-04.png
diff --git a/website/docs/Use-Cases/walk-throughs/code-execution.md b/website/docs/Use-Cases/walk-throughs/code-execution.md
index dfa93e61c1..a6e75f0c50 100644
--- a/website/docs/Use-Cases/walk-throughs/code-execution.md
+++ b/website/docs/Use-Cases/walk-throughs/code-execution.md
@@ -401,3 +401,13 @@ The command line code executor does not keep any state in memory between executi
Contrast to the command line code executor, the Jupyter code executor runs all code blocks in the same Jupyter kernel, which keeps the state in memory between executions. See the topic page for [Jupyter Code Executor](/docs/topics/code-execution/jupyter-code-executor).
The choice between command line and Jupyter code executor depends on the nature of the code blocks in agents' conversation. If each code block is a "script" that does not use variables from previous code blocks, the command line code executor is a good choice. If some code blocks contain expensive computations (e.g., training a machine learning model and loading a large amount of data), and you want to keep the state in memory to avoid repeated computations, the Jupyter code executor is a better choice.
+
+### More Code Execution examples
+- [Task Solving with Code Generation, Execution, and Debugging](https://docs.ag2.ai/notebooks/agentchat_auto_feedback_from_code_execution)
+- [Auto-Generated Agent Chat: Task Solving with Code Gen, Execution, Debugging & Human Feedback](https://docs.ag2.ai/notebooks/agentchat_human_feedback)
+
+### API
+- [ConversableAgent.code_execution_config](TODO)
+- [LocalCommandLineCodeExecutor](TODO)
+- [DockerCommandLineCodeExecutor](TODO)
+- [jupyter.JupyterCodeExecutor](TODO)
diff --git a/website/docs/Use-Cases/walk-throughs/custom-group-chat.md b/website/docs/Use-Cases/walk-throughs/custom-group-chat.md
index 6705c79aed..19182204eb 100644
--- a/website/docs/Use-Cases/walk-throughs/custom-group-chat.md
+++ b/website/docs/Use-Cases/walk-throughs/custom-group-chat.md
@@ -1 +1,714 @@
-# Customized Group Chat
+# Customized Group Chat flows
+
+::tip
+If you haven't had a chance to read about or use AG2's GroupChat orchestration, see the [Getting Started](TODO) introduction to GroupChat.
+::
+
+`GroupChat`'s four built-in conversation patterns, automatic, round robin, random, and manual provide a good degree of flexibility to orchestrate agents in a group chat setting.
+
+However, there are two additional methods to control the flow in a GroupChat:
+
+1. Use a Callable as a speaker selection method
+
+2. Define a graph specifying valid transitions
+
+Here's a walk-through of each of these methods.
+
+## 1. Callable speaker selection method
+By specifying the a Callable as the speaker selection method for a GroupChat, a function is called after each agent replies and the agent it returns will be the next agent.
+
+#### Setting up
+
+```python
+# Imports and an LLM configuration for our agents
+import autogen
+from autogen import (
+ ConversableAgent,
+ AssistantAgent
+ UserProxyAgent,
+ Agent,
+ GroupChat,
+ GroupChatManager,
+)
+
+config_list = [{"model": "gpt-4o-mini", "api_key": os.environ["OPENAI_API_KEY"]}]
+```
+
+#### Agents and workflow
+
+We have 5 agents in our workflow:
+
+- Planner: Give a plan and revise.
+- Engineer: Retrieves papers from the web by writing code.
+- Executor: Executes code.
+- Scientist: Reads papers and writes summaries.
+- Admin (us): Our Human-in-the-loop approving or ending the chat.
+
+```python
+planner = autogen.AssistantAgent(
+ name="Planner",
+ system_message="""Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.
+The plan may involve an engineer who can write code and a scientist who doesn't write code.
+Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.
+""",
+ llm_config=config_list,
+)
+
+user_proxy = autogen.UserProxyAgent(
+ name="Admin",
+ system_message="A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.",
+ code_execution_config=False,
+)
+
+engineer = autogen.AssistantAgent(
+ name="Engineer",
+ llm_config=config_list,
+ system_message="""Engineer. You follow an approved plan. You write python/shell code to solve tasks. Wrap the code in a code block that specifies the script type. The user can't modify your code. So do not suggest incomplete code which requires others to modify. Don't use a code block if it's not intended to be executed by the executor.
+Don't include multiple code blocks in one response. Do not ask others to copy and paste the result. Check the execution result returned by the executor.
+If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
+""",
+)
+scientist = autogen.AssistantAgent(
+ name="Scientist",
+ llm_config=config_list,
+ system_message="""Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code.""",
+)
+
+executor = autogen.UserProxyAgent(
+ name="Executor",
+ system_message="Executor. Execute the code written by the engineer and report the result.",
+ human_input_mode="NEVER",
+ code_execution_config={
+ "last_n_messages": 3,
+ "work_dir": "paper",
+ "use_docker": False,
+ }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
+)
+```
+
+The planned workflow is:
+
+1. The planner interact with Admin (user) to revise a plan. Only when the Admin (human) types "Approve" can we move to the next step.
+2. The engineer will write code to retrieve papers. The code will be executed by executor.
+3. When the code is executed successfully, the scientist will read the papers and write a summary.
+4. The summary will be reviewed by the Admin and give comments. When the Admin types "TERMINATE", the process will be terminated.
+
+Here's the magic function that will be used at each turn to determine the next agent.
+
+By using the chat's messages, retrieved through `groupchat.messages` we can evaluate where we are up to in the workflow and the next agent.
+
+If all fails, we will revert back to the `random` speaker selection method.
+
+```python
+def custom_speaker_selection_func(last_speaker: Agent, groupchat: GroupChat):
+ """Define a customized speaker selection function.
+ A recommended way is to define a transition for each speaker in the groupchat.
+
+ Returns:
+ Return an `Agent` class or a string from ['auto', 'manual', 'random', 'round_robin'] to select a default method to use.
+ """
+ messages = groupchat.messages
+
+ # We'll start with a transition to the planner
+ if len(messages) <= 1:
+ return planner
+
+ if last_speaker is user_proxy:
+ if "Approve" in messages[-1]["content"]:
+ # If the last message is approved, let the engineer to speak
+ return engineer
+ elif messages[-2]["name"] == "Planner":
+ # If it is the planning stage, let the planner to continue
+ return planner
+ elif messages[-2]["name"] == "Scientist":
+ # If the last message is from the scientist, let the scientist to continue
+ return scientist
+
+ elif last_speaker is planner:
+ # Always let the user to speak after the planner
+ return user_proxy
+
+ elif last_speaker is engineer:
+ if "```python" in messages[-1]["content"]:
+ # If the last message is a python code block, let the executor to speak
+ return executor
+ else:
+ # Otherwise, let the engineer to continue
+ return engineer
+
+ elif last_speaker is executor:
+ if "exitcode: 1" in messages[-1]["content"]:
+ # If the last message indicates an error, let the engineer to improve the code
+ return engineer
+ else:
+ # Otherwise, let the scientist to speak
+ return scientist
+
+ elif last_speaker is scientist:
+ # Always let the user to speak after the scientist
+ return user_proxy
+
+ else:
+ return "random"
+```
+
+#### Create GroupChat and run
+
+Make sure to assign the speaker selection function to the GroupChat.
+
+```python
+groupchat = autogen.GroupChat(
+ agents=[user_proxy, engineer, scientist, planner, executor],
+ messages=[],
+ max_round=20,
+ # Here we specify our custom speaker selection function
+ speaker_selection_method=custom_speaker_selection_func,
+)
+
+manager = autogen.GroupChatManager(
+ groupchat=groupchat,
+ llm_config=config_list)
+
+user_proxy.initiate_chat(
+ manager,
+ message="Find a latest paper about gpt-4 on arxiv and find its potential applications in software."
+)
+```
+
+In the output we can see that:
+- It transitioned straight to the Planner
+- After receiving approval from us it transitioned to the Engineer
+- After the Enginner it finds Python code goes to the executor to execute the code
+- The code ran successfully so it transitioned to the Scientist
+- After the Scientist it returned to us and we ended the chat
+
+```console
+Admin (to chat_manager):
+
+Find a latest paper about gpt-4 on arxiv and find its potential applications in software.
+
+--------------------------------------------------------------------------------
+Planner (to chat_manager):
+
+**Initial Plan:**
+
+1. **Scientist's Task: Literature Review**
+ - The scientist will conduct a comprehensive literature review to find the latest paper about GPT-4 on arXiv. This involves using search queries related to GPT-4 and filtering results by the most recent publications.
+
+2. **Scientist's Task: Analysis of the Paper**
+ - Once the latest paper is identified, the scientist will read through the paper to understand its contents, focusing on the methodology, results, and discussions about potential applications in software.
+
+3. **Scientist's Task: Identifying Potential Applications**
+ - The scientist will then brainstorm and list potential applications of GPT-4 in software, based on the findings from the paper. This may include applications in natural language processing, code generation, chatbots, and more.
+
+4. **Engineer's Task: Technical Feasibility Assessment**
+ - The engineer will review the list of potential applications provided by the scientist and assess the technical feasibility of each application. This involves considering the current state of software technology, the capabilities of GPT-4, and the practicality of integrating GPT-4 into existing systems.
+
+5. **Engineer's Task: Prototype Development Plan**
+ - For applications deemed technically feasible, the engineer will draft a plan for developing a prototype that demonstrates the use of GPT-4 in a software application. This plan will outline the required resources, estimated timeline, and the steps for implementation.
+
+6. **Joint Task: Finalizing the Plan**
+ - The scientist and engineer will collaborate to finalize the plan, ensuring that it is scientifically sound and technically viable. They will prepare a document detailing the plan for potential applications and the prototype development.
+
+7. **Presentation to Admin**
+ - The finalized plan will be presented to the admin for approval. The admin will review the plan and provide feedback.
+
+8. **Revisions Based on Feedback**
+ - Based on the admin's feedback, the scientist and engineer will make necessary revisions to the plan. This iterative process will continue until the admin approves the plan.
+
+**Awaiting Admin's Feedback:** Please review the initial plan and provide feedback on any adjustments or additional details you would like to see.
+
+--------------------------------------------------------------------------------
+Admin (to chat_manager):
+
+Approve
+
+--------------------------------------------------------------------------------
+Engineer (to chat_manager):
+
+Since the plan has been approved, I will now proceed with the first step, which is to find the latest paper about GPT-4 on arXiv. To do this, I will write a Python script that uses the arXiv API to search for papers related to GPT-4 and filter them by the most recent publications.
+
+Here is the Python script that accomplishes this task:
+
+'''python
+import requests
+from datetime import datetime
+
+# Define the URL for the arXiv API
+ARXIV_API_URL = "http://export.arxiv.org/api/query"
+
+# Define the search parameters
+search_query = "all:gpt-4"
+start = 0
+max_results = 1
+sort_by = "submittedDate"
+sort_order = "descending"
+
+# Construct the query
+query_params = {
+ "search_query": search_query,
+ "start": start,
+ "max_results": max_results,
+ "sortBy": sort_by,
+ "sortOrder": sort_order
+}
+
+# Send the request to the arXiv API
+response = requests.get(ARXIV_API_URL, params=query_params)
+
+# Check if the request was successful
+if response.status_code == 200:
+ # Parse the response
+ feed = response.text
+ # Find the entry element, which contains the paper information
+ start_entry = feed.find('')
+ end_entry = feed.find('')
+ entry = feed[start_entry:end_entry]
+
+ # Extract the title
+ start_title = entry.find('
') + 7
+ end_title = entry.find('')
+ title = entry[start_title:end_title].strip()
+
+ # Extract the published date
+ start_published = entry.find('') + 12
+ end_published = entry.find('')
+ published = entry[start_published:end_published].strip()
+
+ # Extract the summary
+ start_summary = entry.find('') + 9
+ end_summary = entry.find('')
+ summary = entry[start_summary:end_summary].strip()
+
+ # Extract the authors
+ authors = []
+ start_author = entry.find('')
+ end_author = entry.find('')
+ while start_author != -1 and end_author != -1:
+ start_name = entry.find('', start_author) + 6
+ end_name = entry.find('', start_author)
+ author_name = entry[start_name:end_name].strip()
+ authors.append(author_name)
+ start_author = entry.find('', end_author)
+ end_author = entry.find('', start_author)
+
+ # Print the results
+ print(f"Title: {title}")
+ print(f"Published Date: {published}")
+ print(f"Authors: {', '.join(authors)}")
+ print(f"Summary: {summary}")
+else:
+ print("Failed to retrieve data from arXiv API.")
+'''
+
+This script will output the title, published date, authors, and summary of the most recent paper related to GPT-4 on arXiv. Please note that the actual content of the paper and its potential applications in software will need to be analyzed manually after retrieving the paper information.
+
+--------------------------------------------------------------------------------
+
+>>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...
+Executor (to chat_manager):
+
+exitcode: 0 (execution succeeded)
+Code output:
+Title: A Data-Centric Approach To Generate Faithful and High Quality Patient
+ Summaries with Large Language Models
+Published Date: 024-02-23T16:32:28Z
+Authors: Stefan Hegselmann, Shannon Zejiang Shen, Florian Gierse, Monica Agrawal, David Sontag, Xiaoyi Jiang
+Summary: Patients often face difficulties in understanding their hospitalizations,
+while healthcare workers have limited resources to provide explanations. In
+this work, we investigate the potential of large language models to generate
+patient summaries based on doctors' notes and study the effect of training data
+on the faithfulness and quality of the generated summaries. To this end, we
+develop a rigorous labeling protocol for hallucinations, and have two medical
+experts annotate 100 real-world summaries and 100 generated summaries. We show
+that fine-tuning on hallucination-free data effectively reduces hallucinations
+from 2.60 to 1.55 per summary for Llama 2, while preserving relevant
+information. Although the effect is still present, it is much smaller for GPT-4
+when prompted with five examples (0.70 to 0.40). We also conduct a qualitative
+evaluation using hallucination-free and improved training data. GPT-4 shows
+very good results even in the zero-shot setting. We find that common
+quantitative metrics do not correlate well with faithfulness and quality.
+Finally, we test GPT-4 for automatic hallucination detection, which yields
+promising results.
+
+
+--------------------------------------------------------------------------------
+Scientist (to chat_manager):
+
+Based on the abstract provided, the paper titled "A Data-Centric Approach To Generate Faithful and High Quality Patient Summaries with Large Language Models" explores the use of large language models, including GPT-4, to generate patient summaries from doctors' notes. The study focuses on the impact of training data on the faithfulness and quality of the generated summaries and also investigates the potential of GPT-4 for automatic hallucination detection.
+
+**Potential Applications in Software:**
+
+1. **Healthcare Documentation Automation:**
+ - GPT-4 could be used to develop software that assists healthcare professionals in creating accurate and comprehensive patient summaries by automatically processing doctors' notes and other medical records.
+
+2. **Clinical Decision Support Systems:**
+ - Integrating GPT-4 into clinical decision support systems could provide healthcare workers with insights and suggestions based on a patient's medical history, potentially improving diagnosis and treatment planning.
+
+3. **Patient Education and Communication:**
+ - Software applications could leverage GPT-4 to translate complex medical information into patient-friendly summaries, enhancing patient understanding of their health conditions and treatments.
+
+4. **Medical Training and Simulation:**
+ - GPT-4 could be used to create realistic medical scenarios for training medical students and professionals, simulating patient interactions and generating case studies.
+
+5. **Data Quality Assurance:**
+ - The paper suggests that GPT-4 can be used for automatic hallucination detection, which refers to the identification of inaccuracies or fabrications in generated text. This could be applied to software that ensures the quality and reliability of medical documentation.
+
+6. **Research and Development:**
+ - GPT-4 could assist researchers in summarizing and synthesizing large volumes of medical literature, aiding in the discovery of new insights and the development of novel treatments.
+
+7. **Personalized Health Monitoring:**
+ - Software applications could use GPT-4 to provide personalized health monitoring and advice by analyzing user input, such as symptoms or lifestyle factors, and generating tailored health recommendations.
+
+These potential applications highlight the versatility of GPT-4 in the realm of healthcare software, offering opportunities to enhance patient care, improve healthcare workflows, and support medical education and research.
+
+--------------------------------------------------------------------------------
+Admin (to chat_manager):
+
+TERMINATE
+
+--------------------------------------------------------------------------------
+```
+
+## 2. Specified flows using graphs
+Using a graph, you can specify the valid transitions using the automatic speaker selection method for a GroupChat (`speaker_selection_method="auto"`, which is the default).
+
+A graph is simply a dictionary specifying, for each agent, which agents they can transition to.
+
+```python
+# agent_a can transition to any other agent, however, those agents can only transition to agent_a
+transition_graph = {
+ agent_a: [agent_b, agent_c],
+ agent_b: [agent_a],
+ agent_c: [agent_a],
+}
+```
+
+At each turn, the LLM will be presented with the valid agents based on the graph, limiting the transitions to those you've specified.
+
+::note
+There are a number of interesting transition paths you can create with a graph, see the [Finite State Machine documentation](https://docs.ag2.ai/notebooks/agentchat_groupchat_finite_state_machine) to explore them.
+::
+
+In this walk-through we're going to create a game with three teams, each team has three players (with the first as a team leader). Each player has a number of chocolates that no other player knows about, even the team leader.
+
+The goal of the game is to tally up all the chocolates across the three teams, with each team leader responsible for tallying up their team's chocolate count.
+
+To control the flow of the conversation, team leaders can transition to each other and transitions within a team can only be with their team members.
+
+Here's what that should look like with our three teams, A, B, and C. Agents labelled zero will be the team leaders.
+
+![Swarm Enhanced Demonstration](./assets/group-chat-fsm.png)
+
+```python
+# Imports and LLM Configuration
+from autogen import ConversableAgent, GroupChat, GroupChatManager
+import random
+import os
+
+config_list = {"model": "gpt-4o", "api_key": os.environ["OPENAI_API_KEY"]}
+
+# Helper function to get an agent from our working list by name
+def get_agent_by_name(agents, name) -> ConversableAgent:
+ for agent in agents:
+ if agent.name == name:
+ return agent
+```
+Here we dictionaries for our agents, the graph, and the number of chocolates for each agent
+```python
+# Create an empty directed graph
+agents = []
+speaker_transitions_dict = {}
+secret_values = {}
+```
+This looks more complicated than it is, we're creating each agent for the three teams, giving them a random number of chocolates and explicit instructions on what they need to do.
+
+For each team we create the allowable transitions for inside that team, which are just with each other.
+
+```python
+# Outer loop for prefixes 'A', 'B', 'C'
+for prefix in ["A", "B", "C"]:
+ # Add 3 nodes with each prefix to the graph using a for loop
+ for i in range(3):
+ node_id = f"{prefix}{i}"
+ secret_value = random.randint(1, 5) # Generate a random secret value
+ secret_values[node_id] = secret_value
+
+ # Create an ConversableAgent for each node (assuming ConversableAgent is a defined class)
+ agents.append(
+ ConversableAgent(
+ name=node_id,
+ system_message=f"""Your name is {node_id}.
+ Do not respond as the speaker named in the NEXT tag if your name is not in the NEXT tag. Instead, suggest a relevant team leader to handle the mis-tag, with the NEXT: tag.
+
+ You have {secret_value} chocolates.
+
+ The list of players are [A0, A1, A2, B0, B1, B2, C0, C1, C2].
+
+ Your first character of your name is your team, and your second character denotes that you are a team leader if it is 0.
+ CONSTRAINTS: Team members can only talk within the team, whilst team leader can talk to team leaders of other teams but not team members of other teams.
+
+ You can use NEXT: to suggest the next speaker. You have to respect the CONSTRAINTS, and can only suggest one player from the list of players, i.e., do not suggest A3 because A3 is not from the list of players.
+ Team leaders must make sure that they know the sum of the individual chocolate count of all three players in their own team, i.e., A0 is responsible for team A only.
+
+ Keep track of the player's tally using a JSON format so that others can check the total tally. Use
+ A0:?, A1:?, A2:?,
+ B0:?, B1:?, B2:?,
+ C0:?, C1:?, C2:?
+
+ If you are the team leader, you should aggregate your team's total chocolate count to cooperate.
+ Once the team leader know their team's tally, they can suggest another team leader for them to find their team tally, because we need all three team tallys to succeed.
+ Use NEXT: to suggest the next speaker, e.g., NEXT: A0.
+
+ Once we have the total tally from all nine players, sum up all three teams' tally, then terminate the discussion using DONE!.
+ """,
+ llm_config=config_list,
+ )
+ )
+ speaker_transitions_dict[agents[-1]] = []
+
+
+ # For each team, create the team's internal transitions (any agent to any agent in a team)
+ for source_node in range(3):
+ source_id = f"{prefix}{source_node}"
+ for target_node in range(3):
+ target_id = f"{prefix}{target_node}"
+ if source_node != target_node: # To avoid self-loops
+ speaker_transitions_dict[get_agent_by_name(agents, source_id)].append(
+ get_agent_by_name(agents, name=target_id)
+ )
+```
+
+Here we create the transitions between the three team leaders who can transition to any other team leader.
+
+```python
+# Adding edges between teams
+speaker_transitions_dict[get_agent_by_name(agents, "A0")].append(get_agent_by_name(agents, name="B0"))
+speaker_transitions_dict[get_agent_by_name(agents, "A0")].append(get_agent_by_name(agents, name="C0"))
+speaker_transitions_dict[get_agent_by_name(agents, "B0")].append(get_agent_by_name(agents, name="A0"))
+speaker_transitions_dict[get_agent_by_name(agents, "B0")].append(get_agent_by_name(agents, name="C0"))
+speaker_transitions_dict[get_agent_by_name(agents, "C0")].append(get_agent_by_name(agents, name="A0"))
+speaker_transitions_dict[get_agent_by_name(agents, "C0")].append(get_agent_by_name(agents, name="B0"))
+```
+
+#### Create GroupChat and run
+Here we set the graph to the GroupChat when creating it using the `allowed_or_disallowed_speaker_transitions` parameter. We also specify `speaker_transitions_type` to "allowed" to indicate the provided transitions are allowed (alternative is "disallowed").
+```python
+group_chat = GroupChat(
+ agents=agents,
+ messages=[],
+ max_round=20,
+ allowed_or_disallowed_speaker_transitions=speaker_transitions_dict,
+ speaker_transitions_type="allowed",
+)
+
+# The GroupChatManager will end the chat when DONE! is received, which agents can say when all counts are done
+manager = GroupChatManager(
+ groupchat=group_chat,
+ llm_config=config_list,
+ code_execution_config=False,
+ is_termination_msg=lambda x: "DONE!" in (x.get("content", "") or "").upper(),
+)
+
+# Start the game from Team A's team leader.
+agents[0].initiate_chat(
+ manager,
+ message="""
+ There are 9 players in this game, split equally into Teams A, B, C. Therefore each team has 3 players, including the team leader.
+ The task is to find out the sum of chocolate count from all nine players. I will now start with my team.
+ NEXT: A1
+ """,
+)
+```
+
+Looking at the output, we can see there were 20 chocolates across all the teams and the agents worked through it methodically team by team.
+```console
+A0 (to chat_manager):
+
+
+There are 9 players in this game, split equally into Teams A, B, C. Therefore each team has 3 players, including the team leader.
+The task is to find out the sum of chocolate count from all nine players. I will now start with my team.
+NEXT: A1
+
+--------------------------------------------------------------------------------
+
+Next speaker: A1
+
+
+>>>>>>>> USING AUTO REPLY...
+A1 (to chat_manager):
+
+I have 1 chocolate.
+Here is the current tally for Team A:
+
+A0: ?, A1: 1, A2: ?
+
+NEXT: A2
+
+--------------------------------------------------------------------------------
+
+Next speaker: A2
+
+
+>>>>>>>> USING AUTO REPLY...
+A2 (to chat_manager):
+
+I have 2 chocolates.
+
+Here is the updated tally for Team A:
+
+A0: ?, A1: 1, A2: 2
+
+NEXT: A0
+
+--------------------------------------------------------------------------------
+
+Next speaker: A0
+
+
+>>>>>>>> USING AUTO REPLY...
+A0 (to chat_manager):
+
+Thank you, Team A. Let's update the tally:
+
+A0: 4, A1: 1, A2: 2
+
+Team A's tally is 7 chocolates in total.
+
+I will now suggest the team leader of Team B to start determining their tally.
+
+NEXT: B0
+
+--------------------------------------------------------------------------------
+
+Next speaker: B0
+
+
+>>>>>>>> USING AUTO REPLY...
+B0 (to chat_manager):
+
+I have 4 chocolates. Let me coordinate with Team B to find out the total tally.
+
+Here is the current tally for Team B:
+
+B0: 4, B1: ?, B2: ?
+
+NEXT: B1
+
+--------------------------------------------------------------------------------
+
+Next speaker: B1
+
+
+>>>>>>>> USING AUTO REPLY...
+B1 (to chat_manager):
+
+I have 1 chocolate.
+
+Here is the updated tally for Team B:
+
+B0: 4, B1: 1, B2: ?
+
+NEXT: B2
+
+--------------------------------------------------------------------------------
+
+Next speaker: B2
+
+
+>>>>>>>> USING AUTO REPLY...
+B2 (to chat_manager):
+
+I have 1 chocolate.
+
+Here is the updated tally for Team B:
+
+B0: 4, B1: 1, B2: 1
+
+Team B's tally is 6 chocolates in total.
+
+I will now suggest the team leader of Team C to start determining their tally.
+
+NEXT: C0
+
+--------------------------------------------------------------------------------
+
+Next speaker: B0
+
+
+>>>>>>>> USING AUTO REPLY...
+B0 (to chat_manager):
+
+It seems there was a mis-tag. Please let C0, the team leader of Team C, determine their team's chocolate tally.
+
+NEXT: C0
+
+--------------------------------------------------------------------------------
+
+Next speaker: C0
+
+
+>>>>>>>> USING AUTO REPLY...
+C0 (to chat_manager):
+
+I have 3 chocolates. Let me coordinate with Team C to find out the total tally.
+
+Here is the current tally for Team C:
+
+C0: 3, C1: ?, C2: ?
+
+NEXT: C1
+
+--------------------------------------------------------------------------------
+
+Next speaker: C1
+
+
+>>>>>>>> USING AUTO REPLY...
+C1 (to chat_manager):
+
+I have 1 chocolate.
+
+Here is the updated tally for Team C:
+
+C0: 3, C1: 1, C2: ?
+
+NEXT: C2
+
+--------------------------------------------------------------------------------
+
+Next speaker: C2
+
+
+>>>>>>>> USING AUTO REPLY...
+C2 (to chat_manager):
+
+I have 3 chocolates.
+
+Here is the updated tally for Team C:
+
+C0: 3, C1: 1, C2: 3
+
+Team C's tally is 7 chocolates in total.
+
+Since all teams have their tally ready, let's sum up the total:
+
+Team A's tally: 7 chocolates
+Team B's tally: 6 chocolates
+Team C's tally: 7 chocolates
+
+Total tally: 7 + 6 + 7 = 20 chocolates
+
+DONE!
+
+--------------------------------------------------------------------------------
+```
+### More GroupChat examples
+- [GroupChat with Customized Speaker Selection Method](https://docs.ag2.ai/notebooks/agentchat_groupchat_customized)
+- [GroupChat with Coder and Visualization Critic](https://docs.ag2.ai/notebooks/agentchat_groupchat_vis)
+- [GroupChat with Retrieval-Augmented Generation](https://docs.ag2.ai/notebooks/agentchat_groupchat_RAG)
+- [Implementing Swarm with a GroupChat](https://docs.ag2.ai/notebooks/agentchat_swarm_w_groupchat_legacy)
+
+### API
+[GroupChat](TODO)
+[GroupChatManager]
diff --git a/website/docs/Use-Cases/walk-throughs/enhanced-swarm.md b/website/docs/Use-Cases/walk-throughs/enhanced-swarm.md
index 0668db76cf..472c6dcaca 100644
--- a/website/docs/Use-Cases/walk-throughs/enhanced-swarm.md
+++ b/website/docs/Use-Cases/walk-throughs/enhanced-swarm.md
@@ -12,16 +12,16 @@ In this walk-through of a customer service workflow, we will utilize three of th
- Conditional handoffs
#### Visualizing a Swarm
-It's useful to draw your agents, flows, context variables, and tools.
+It's useful to draw your agents, flows, context variables, and tools and then create the swarm based on the diagram.
This is the scenario we will run through, with customers able to enquire about their orders. However, we will make sure that they are authenticated to do so.
-![Swarm Enhanced Demonstration](./assets/swarm_enhanced_01.png)
+![Swarm Enhanced Demonstration](./assets/swarm-enhanced-01.png)
Key aspects of this swarm are:
-1. System messages are customised, incorporating the context of the workflow
-2. A nested chat handles the order retrieval and summarisation
+1. System messages are customized, incorporating the context of the workflow
+2. A nested chat handles the order retrieval and summarization
3. Handoffs are conditional, only being available when they are relevant
#### Setting up
@@ -32,8 +32,8 @@ import os
from typing import Any, Dict, List
from autogen import (
- AFTER_WORK,
- ON_CONDITION,
+ AfterWork,
+ OnCondition,
UpdateSystemMessage,
AfterWorkOption,
ConversableAgent,
@@ -218,7 +218,7 @@ Logged in: {logged_in}
Enquiring for Order ID: {order_id}
"""
-order_triage_agent = SwarmAgent(
+order_triage_agent = ConversableAgent(
name="order_triage_agent",
update_agent_state_before_reply=[
UpdateSystemMessage(order_triage_prompt),
@@ -229,7 +229,7 @@ order_triage_agent = SwarmAgent(
authentication_prompt = "You are an authentication agent that verifies the identity of the customer."
-authentication_agent = SwarmAgent(
+authentication_agent = ConversableAgent(
name="authentication_agent",
system_message=authentication_prompt,
functions=[login_customer_by_username],
@@ -248,7 +248,7 @@ Logged in: {logged_in}
Enquiring for Order ID: {order_id}
"""
-order_mgmt_agent = SwarmAgent(
+order_mgmt_agent = ConversableAgent(
name="order_mgmt_agent",
update_agent_state_before_reply=[
UpdateSystemMessage(order_management_prompt),
@@ -260,9 +260,9 @@ order_mgmt_agent = SwarmAgent(
#### Nested chats
For tasks that require a separate AG2 chat to occur, nested chats are a great option. This is the second enhanced feature we're covering, nested chats as hand-offs.
-In our scenario, we want to be able to find out the details of an order if a customer asks. To do this we need a couple of agents, one with access to the order database, to work together to retrieve and summarise the order details.
+In our scenario, we want to be able to find out the details of an order if a customer asks. To do this we need a couple of agents, one with access to the order database, to work together to retrieve and summarize the order details.
-Our nested chats are setup as a queue of two chats, the first is for the `order_retrieval_agent` who will extract the order information from the database and pass the details of it on to the second chat. The `order_summariser_agent` uses their LLM to format that and return it to the Swarm.
+Our nested chats are setup as a queue of two chats, the first is for the `order_retrieval_agent` who will extract the order information from the database and pass the details of it on to the second chat. The `order_summarizer_agent` uses their LLM to format that and return it to the Swarm.
```python
# NESTED CHAT - Delivery Status
@@ -272,9 +272,9 @@ order_retrieval_agent = ConversableAgent(
llm_config=llm_config,
)
-order_summariser_agent = ConversableAgent(
- name="order_summariser_agent",
- system_message="You are an order summariser agent that provides a summary of the order details.",
+order_summarizer_agent = ConversableAgent(
+ name="order_summarizer_agent",
+ system_message="You are an order summarizer agent that provides a summary of the order details.",
llm_config=llm_config,
)
@@ -297,8 +297,8 @@ nested_chat_one = {
}
nested_chat_two = {
- "recipient": order_summariser_agent,
- "message": "Summarise the order details provided in a tabulated, text-based, order sheet format",
+ "recipient": order_summarizer_agent,
+ "message": "Summarize the order details provided in a tabulated, text-based, order sheet format",
"max_turns": 1,
"summary_method": "last_msg",
}
@@ -318,52 +318,52 @@ Here we can see the use of context variable keys in the `available` parameter as
# HANDOFFS
order_triage_agent.register_hand_off(
[
- ON_CONDITION(
+ OnCondition(
target=authentication_agent,
condition="The customer is not logged in, authenticate the customer.",
available="requires_login",
),
- ON_CONDITION(
+ OnCondition(
target=order_mgmt_agent,
condition="The customer is logged in, continue with the order triage.",
available="logged_in",
),
- AFTER_WORK(AfterWorkOption.REVERT_TO_USER),
+ AfterWork(AfterWorkOption.REVERT_TO_USER),
]
)
authentication_agent.register_hand_off(
[
- ON_CONDITION(
+ OnCondition(
target=order_triage_agent,
condition="The customer is logged in, continue with the order triage.",
available="logged_in",
),
- AFTER_WORK(AfterWorkOption.REVERT_TO_USER),
+ AfterWork(AfterWorkOption.REVERT_TO_USER),
]
)
-def has_order_in_context(agent: SwarmAgent, messages: List[Dict[str, Any]]) -> bool:
+def has_order_in_context(agent: ConversableAgent, messages: List[Dict[str, Any]]) -> bool:
return agent.get_context("has_order_id")
order_mgmt_agent.register_hand_off(
[
- ON_CONDITION(
+ OnCondition(
target={
"chat_queue": chat_queue,
},
condition="Retrieve the status of the order",
available=has_order_in_context,
),
- ON_CONDITION(
+ OnCondition(
target=authentication_agent,
condition="The customer is not logged in, authenticate the customer.",
available="requires_login",
),
- ON_CONDITION(target=order_triage_agent, condition="The customer has no more enquiries about this order."),
- AFTER_WORK(AfterWorkOption.REVERT_TO_USER),
+ OnCondition(target=order_triage_agent, condition="The customer has no more enquiries about this order."),
+ AfterWork(AfterWorkOption.REVERT_TO_USER),
]
)
```
@@ -672,16 +672,16 @@ It looks like order TR13845 for a mattress has been shipped. The shipping addres
Starting a new chat....
********************************************************************************
-nested_chat_order_mgmt_agent_1 (to order_summariser_agent):
+nested_chat_order_mgmt_agent_1 (to order_summarizer_agent):
-Summarise the order details provided in a tabulated, text-based, order sheet format
+Summarize the order details provided in a tabulated, text-based, order sheet format
Context:
It looks like order TR13845 for a mattress has been shipped. The shipping address for this order is 123 Main St, State College, PA 12345. If you need further details about this order, just let me know!
--------------------------------------------------------------------------------
>>>>>>>> USING AUTO REPLY...
-order_summariser_agent (to nested_chat_order_mgmt_agent_1):
+order_summarizer_agent (to nested_chat_order_mgmt_agent_1):
'''
Order Summary:
@@ -721,7 +721,7 @@ If you have any more questions or need further assistance, feel free to ask!
--------------------------------------------------------------------------------
```
-Our nested chats queue ran, with the `order_retrieval_agent` validating and retrieving the order and the `order_summariser_agent` taking those details and summarizing them before returning them to the Swarm.
+Our nested chats queue ran, with the `order_retrieval_agent` validating and retrieving the order and the `order_summarizer_agent` taking those details and summarizing them before returning them to the Swarm.
Finally, with no more queries we return back to the triage agent and the workflow is complete.
@@ -761,16 +761,21 @@ Using the three enhanced features of AG2's Swarm, we were able to control the fl
Here's the flow above.
At the beginning, we weren't logged in and there was only one path.
-![Swarm Enhanced Demonstration](./assets/swarm_enhanced_02.png)
+![Swarm Enhanced Demonstration](./assets/swarm-enhanced-02.png)
After we logged in, we needed to get the order id.
-![Swarm Enhanced Demonstration](./assets/swarm_enhanced_03.png)
+![Swarm Enhanced Demonstration](./assets/swarm-enhanced-03.png)
With the order id, we can handle order enquiries and retrieve order details.
-![Swarm Enhanced Demonstration](./assets/swarm_enhanced_04.png)
+![Swarm Enhanced Demonstration](./assets/swarm-enhanced-04.png)
-
-
-### Need more examples
+### More Swarm examples
- [Introduction to Swarm notebook](https://docs.ag2.ai/notebooks/agentchat_swarm)
- [Swarm with GraphRAG notebook](https://docs.ag2.ai/notebooks/agentchat_swarm_graphrag_trip_planner)
+
+### API
+
+- [initiate_swarm_chat](TODO)
+- [OnCondition](TODO)
+- [AfterWork](TODO)
+- [UpdateSystemMessage](TODO)
diff --git a/website/docs/Use-Cases/walk-throughs/rag.md b/website/docs/Use-Cases/walk-throughs/rag.md
index faf5161f10..0ed25daa0e 100644
--- a/website/docs/Use-Cases/walk-throughs/rag.md
+++ b/website/docs/Use-Cases/walk-throughs/rag.md
@@ -1,13 +1,13 @@
# RAG
::tip
-Want an agent that can do this for you? See our walk-through on DocumentAgent.
+Want an agent that can do this for you? See [DocumentAgent](TODO) in our Reference Agents section.
::
-Retrieval-Augmented Generation is a technique to improve LLM-generated responses by giving it additional knowledge.
+Retrieval-Augmented Generation is a technique to improve LLM-generated responses by giving it additional knowledge. This typically involves gathering the information and injecting it into an agent's system message for use by their LLM.
There are a number of ways to incorporate RAG into your AG2 workflow and agents:
-- Use a reference agent, DocumentAgent
+- Use an AG2 reference agent, DocumentAgent
- Add RAG capabilities to an agent
- Add RAG tools to an agent
- Incorporate context into an agent's system message (manually and automatically)
@@ -20,12 +20,119 @@ DocumentAgent takes the hassle out of loading, parsing, storing, and querying kn
## 2. Add RAG capabilities to an agent
-...
+AG2 allows you to add *capabilities* to agents and an example of a capability is RAG using a graph database.
+
+It only takes two steps to do this:
+1. Create the capability
+2. Add it to the agent
+
+See the notebooks associated with the capabilities below for walk-throughs.
+
+#### RAG Capability: Neo4j GraphRAG
+Based on AG2's base GraphRAG capability, this Neo4j GraphRAG capability allows the embedding and querying of information with a Neo4j graph database.
+
+See the [Using Neo4j's graph database with AG2 agents for Q&A notebook](https://docs.ag2.ai/notebooks/agentchat_graph_rag_neo4j_native).
+
+#### RAG Capability: FalkorDB GraphRAG
+Also based on AG2's base GraphRAG capability, this capability uses a FalkorDB GraphRAG database.
+
+See the [Using FalkorGraphRagCapability with agents for GraphRAG Q&A notebook](https://docs.ag2.ai/notebooks/agentchat_graph_rag_falkordb)
+
+::tip
+If you need a capability for a different GraphRAG database, consider building a capability similar to these using our [GraphRagCapability](https://docs.ag2.ai/docs/reference/agentchat/contrib/graph_rag/graph_rag_capability) base class.
## 3. Add RAG tools to an agent
-...
+[COMING WITH DocumentAgent - COMPLETE THIS WHEN READY](TODO)
## 4. Incorporating context into an Agent's system message
-...
+ConversableAgent has a number of hooks that get run before an agent replies. You can utilise the `update_agent_state` hook to run a function that updates your agent's system message with some context before it goes to the LLM.
+
+Within the function use the [ConversableAgent.update_system_message](https://docs.ag2.ai/docs/reference/agentchat/conversable_agent#update-system-message) method to update the system message.
+
+Let's walk-through a simple example that puts a listing of the files in the current directory into an agent's system message and asks an LLM to explain them.
+
+We start with our imports, LLM configuration, and the system message template, which we'll inject the file listing in to.
+```python
+import os
+from autogen import ConversableAgent, UserProxyAgent
+
+config_list = {"model": "gpt-4o", "api_key": os.environ["OPENAI_API_KEY"]}
+
+base_system_message = "You are a helpful agent, answering questions about the files in a directory:\n{filelisting}"
+```
+
+Here's the function we'll attach to the hook, it gets all files in the current directory and updates the associated agent's system message accordingly.
+
+```python
+def give_agent_file_listing(agent: ConversableAgent, messages: list[dict]) -> None:
+ # Get the list of files in the current directory
+ files = os.listdir()
+
+ # Put them in a string
+ files_str = "\n".join(files)
+
+ # Use the system message template and update the agent's system message to include the file listing
+ agent.update_system_message(base_system_message.format(filelisting=files_str))
+```
+
+Now we create the agent and attach the hook.
+
+```python
+files_agent = ConversableAgent(
+ name="files_agent",
+ system_message="""You are a helpful agent, answering questions about the files in a directory.""",
+ llm_config=config_list,
+ )
+
+files_agent.register_hook(
+ hookable_method="update_agent_state",
+ hook=give_agent_file_listing,
+ )
+```
+
+Finally we create a human-in-the-loop agent and ask our `files_agent` about the files.
+
+```python
+human = ConversableAgent(
+ name="human",
+ human_input_mode="NEVER",
+ max_consecutive_auto_reply=1,
+ )
+
+human.initiate_chat(
+ recipient=files_agent,
+ message="Tell me about the files in my directory.",
+ max_turns=1,
+ )
+```
+
+And we can see the LLM now knows about the files and directories in the current folder and is able to provide some information about them.
+
+```console
+human (to files_agent):
+
+Tell me about the files in my directory.
+
+--------------------------------------------------------------------------------
+
+>>>>>>>> USING AUTO REPLY...
+files_agent (to human):
+
+Certainly! Here's a brief explanation of each file and directory in your list:
+
+1. **.ssh**: Contains configuration files and keys related to the SSH protocol, which are used to securely log into remote systems.
+
+2. **.dotnet**: This directory is related to .NET and contains files and settings for .NET core and related development tools.
+
+3. **.config**: A directory commonly used to store user-level configuration files for applications.
+
+4. **.cache**: This directory is typically used to store cached data, which can improve the performance of the applications.
+
+5. **ag2**: A directory for the open-source AgentOS, AG2 :)
+
+These explanations are based on typical setups and usages; your specific use cases might vary. If you need more details on any item or it's a custom entry, checking the contents or configuration might be necessary for precise information.
+
+--------------------------------------------------------------------------------
+```
diff --git a/website/docs/Use-Cases/walk-throughs/tools-with-secrets.md b/website/docs/Use-Cases/walk-throughs/tools-with-secrets.md
index 70b8e209a7..277f7748c1 100644
--- a/website/docs/Use-Cases/walk-throughs/tools-with-secrets.md
+++ b/website/docs/Use-Cases/walk-throughs/tools-with-secrets.md
@@ -201,6 +201,9 @@ false
```
The same occurred for the other 3rd party tool and function, with all credentials silently injected.
-### Need more examples?
-
+### More Tool with Dependency Injection examples
See the [Tools with Dependency Injection](https://docs.ag2.ai/notebooks/tools_dependency_injection) notebook.
+
+### API
+- [BaseContext](TODO)
+- [Depends](TODO)
diff --git a/website/docs/getting-started/human-in-the-loop.md b/website/docs/getting-started/human-in-the-loop.md
index 7cc912104d..91c88da0a0 100644
--- a/website/docs/getting-started/human-in-the-loop.md
+++ b/website/docs/getting-started/human-in-the-loop.md
@@ -42,4 +42,4 @@ the_human.initiate_chat(recipient=lesson_planner, message="Today, let's introduc
2. Our `the_human` agent starts a conversation by sending a message to `lesson_planner`. An agent's `initiate_chat` method is used to start a conversation between two agents.
-Next > many-agents.md
+Next > orchestrations.md
diff --git a/website/docs/getting-started/index.md b/website/docs/getting-started/index.md
index e433d9c848..ea026508e1 100644
--- a/website/docs/getting-started/index.md
+++ b/website/docs/getting-started/index.md
@@ -5,7 +5,7 @@ Replaced by these pages, in order:
- llm-configuration.md
- conversable-agent.md
- human-in-the-loop.md
-- many-agents.md
+- orchestrations.md
- groupchat.md
- swarm.md
- tools.md
diff --git a/website/docs/getting-started/many-agents.md b/website/docs/getting-started/orchestrations.md
similarity index 100%
rename from website/docs/getting-started/many-agents.md
rename to website/docs/getting-started/orchestrations.md