diff --git a/docs/images/external/autogen/autogen_integration.png b/docs/images/external/autogen/autogen_integration.png
new file mode 100644
index 00000000..c911c887
Binary files /dev/null and b/docs/images/external/autogen/autogen_integration.png differ
diff --git a/examples/autogen/AgentChat.ipynb b/examples/autogen/AgentChat.ipynb
new file mode 100644
index 00000000..cb1a39da
--- /dev/null
+++ b/examples/autogen/AgentChat.ipynb
@@ -0,0 +1,224 @@
+{
+ "cells": [
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "id": "bb6538d8-2a5d-4a99-b2c1-7130963e4f7b",
+ "metadata": {},
+ "source": [
+ "# Adding AgentOps to an existing Autogen service\n",
+ "\n",
+ "\n",
+ "To get started, you'll need to install the AgentOps package and [set an API key](app.agentops.ai).\n",
+ "\n",
+ "AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "π AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=24c5d9f6-fb82-41e6-a468-2dc74a5318a3\u001b[0m\u001b[0m\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "AgentOps is now running. You can view your session in the link above\n"
+ ]
+ }
+ ],
+ "source": [
+ "import agentops\n",
+ "\n",
+ "from autogen import ConversableAgent, UserProxyAgent, config_list_from_json\n",
+ "\n",
+ "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n",
+ "agentops.init(api_key=\"...\", tags=[\"simple-autogen-example\"])\n",
+ "\n",
+ "print('AgentOps is now running. You can view your session in the link above')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7858f0f6-9aca-4cdb-a514-9fbf7e353d50",
+ "metadata": {},
+ "source": [
+ "AutoGen will now start automatically tracking\n",
+ "\n",
+ "* LLM prompts and completions\n",
+ "* Token usage and costs\n",
+ "* Agent names and actions\n",
+ "* Correspondence between agents\n",
+ "* Tool usage\n",
+ "* Errors"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33magent\u001b[0m (to user):\n",
+ "\n",
+ "How can I help you today?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "name": "stdin",
+ "output_type": "stream",
+ "text": [
+ "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: Tell me a joke about AgentOps\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33muser\u001b[0m (to agent):\n",
+ "\n",
+ "Tell me a joke about AgentOps\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33magent\u001b[0m (to user):\n",
+ "\n",
+ "Why don't AgentOps teams ever play hide and seek?\n",
+ "\n",
+ "Because good luck hiding when they always know where everyone is supposed to be!\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "name": "stdin",
+ "output_type": "stream",
+ "text": [
+ "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: Another\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33muser\u001b[0m (to agent):\n",
+ "\n",
+ "Another\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33magent\u001b[0m (to user):\n",
+ "\n",
+ "Why did the AgentOps team bring a ladder to work?\n",
+ "\n",
+ "Because theyβre always reaching for high-level optimizations!\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "name": "stdin",
+ "output_type": "stream",
+ "text": [
+ "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: exit\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "π AgentOps: This run's cost $0.001080\n",
+ "π AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=24c5d9f6-fb82-41e6-a468-2dc74a5318a3\u001b[0m\u001b[0m\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Success! Visit your AgentOps dashboard to see the replay\n"
+ ]
+ }
+ ],
+ "source": [
+ "\n",
+ "# Define an openai api key for the agent configurations\n",
+ "openai_api_key = \"...\"\n",
+ "config_list = [\n",
+ " {\n",
+ " \"model\": \"gpt-4-turbo\",\n",
+ " \"api_key\": openai_api_key,\n",
+ " \"tags\": [\"gpt-4\", \"tool\"]\n",
+ " }\n",
+ "]\n",
+ "\n",
+ "# Create the agent that uses the LLM.\n",
+ "assistant = ConversableAgent(\"agent\", llm_config={\"config_list\": config_list})\n",
+ "\n",
+ "# Create the agent that represents the user in the conversation.\n",
+ "user_proxy = UserProxyAgent(\"user\", code_execution_config=False)\n",
+ "\n",
+ "# Let the assistant start the conversation. It will end when the user types \"exit\".\n",
+ "assistant.initiate_chat(user_proxy, message=\"How can I help you today?\")\n",
+ "\n",
+ "# Close your AgentOps session to indicate that it completed.\n",
+ "agentops.end_session(\"Success\")\n",
+ "print('Success! Visit your AgentOps dashboard to see the replay')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7b422137-903a-41ef-a4ca-95b50aea4138",
+ "metadata": {},
+ "source": [
+ "You can view data on this run at [app.agentops.ai](app.agentops.ai).\n",
+ "\n",
+ "The dashboard will display LLM events for each message sent by each agent, including those made by the human user."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "72993a75-1031-4874-aa26-0ef816a3256c",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/autogen/MathAgent.ipynb b/examples/autogen/MathAgent.ipynb
new file mode 100644
index 00000000..5dbc5557
--- /dev/null
+++ b/examples/autogen/MathAgent.ipynb
@@ -0,0 +1,321 @@
+{
+ "cells": [
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "id": "bb6538d8-2a5d-4a99-b2c1-7130963e4f7b",
+ "metadata": {},
+ "source": [
+ "# Adding AgentOps to an existing Autogen service\n",
+ "\n",
+ "\n",
+ "To get started, you'll need to install the AgentOps package and [set an API key](app.agentops.ai).\n",
+ "\n",
+ "AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "π AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=51556b00-428b-4cae-b815-1320ebc7e810\u001b[0m\u001b[0m\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "AgentOps is now running. You can view your session in the link above\n"
+ ]
+ }
+ ],
+ "source": [
+ "from typing import Annotated, Literal\n",
+ "from autogen import ConversableAgent, UserProxyAgent, config_list_from_json, register_function\n",
+ "import agentops\n",
+ "\n",
+ "agentops.init(api_key=\"...\", tags=[\"autogen-tool-example\"])\n",
+ "\n",
+ "print('AgentOps is now running. You can view your session in the link above')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7858f0f6-9aca-4cdb-a514-9fbf7e353d50",
+ "metadata": {},
+ "source": [
+ "AutoGen will now start automatically tracking\n",
+ "\n",
+ "* LLM prompts and completions\n",
+ "* Token usage and costs\n",
+ "* Agent names and actions\n",
+ "* Correspondence between agents\n",
+ "* Tool usage\n",
+ "* Errors"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define an openai api key for the agent configurations\n",
+ "openai_api_key = \"...\"\n",
+ "config_list = [\n",
+ " {\n",
+ " \"model\": \"gpt-4-turbo\",\n",
+ " \"api_key\": openai_api_key,\n",
+ " \"tags\": [\"gpt-4\", \"tool\"]\n",
+ " }\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "9e4dfe37-85e0-4035-a314-3459c6e378c4",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "π AgentOps: Cannot start session - session already started\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "What is (1423 - 123) / 3 + (32 + 23) * 5?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/Users/reibs/Projects/autogen/autogen/agentchat/conversable_agent.py:2489: UserWarning: Function 'calculator' is being overridden.\n",
+ " warnings.warn(f\"Function '{tool_sig['function']['name']}' is being overridden.\", UserWarning)\n",
+ "/Users/reibs/Projects/autogen/autogen/agentchat/conversable_agent.py:2408: UserWarning: Function 'calculator' is being overridden.\n",
+ " warnings.warn(f\"Function '{name}' is being overridden.\", UserWarning)\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_pSJXJKu1qbfRV4SVNHzTaG1z): calculator *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"a\": 1423, \"b\": 123, \"operator\": \"-\"}\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
+ "\u001b[32m***** Suggested tool call (call_kHRXi8vq5XsZSrGDnVTA1oy7): calculator *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"a\": 32, \"b\": 23, \"operator\": \"+\"}\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_pSJXJKu1qbfRV4SVNHzTaG1z) *****\u001b[0m\n",
+ "1300\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_kHRXi8vq5XsZSrGDnVTA1oy7) *****\u001b[0m\n",
+ "55\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_srDG3bYDpkdoIVn1mVVJNTJj): calculator *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"a\": 1300, \"b\": 3, \"operator\": \"/\"}\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
+ "\u001b[32m***** Suggested tool call (call_jXJHjxZlnsHEbLaG4hQXUX1v): calculator *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"a\": 55, \"b\": 5, \"operator\": \"*\"}\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_srDG3bYDpkdoIVn1mVVJNTJj) *****\u001b[0m\n",
+ "433\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_jXJHjxZlnsHEbLaG4hQXUX1v) *****\u001b[0m\n",
+ "275\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_jKyiL6oizDZPfX16hhVi6pa3): calculator *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"a\":433,\"b\":275,\"operator\":\"+\"}\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_jKyiL6oizDZPfX16hhVi6pa3) *****\u001b[0m\n",
+ "708\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
+ "\n",
+ "The result of the expression \\((1423 - 123) / 3 + (32 + 23) * 5\\) is 708. \n",
+ "\n",
+ "'TERMINATE'\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "π AgentOps: This run's cost $0.000600\n",
+ "π AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=51556b00-428b-4cae-b815-1320ebc7e810\u001b[0m\u001b[0m\n"
+ ]
+ }
+ ],
+ "source": [
+ "Operator = Literal[\"+\", \"-\", \"*\", \"/\"]\n",
+ "\n",
+ "def calculator(a: int, b: int, operator: Annotated[Operator, \"operator\"]) -> int:\n",
+ " if operator == \"+\":\n",
+ " return a + b\n",
+ " elif operator == \"-\":\n",
+ " return a - b\n",
+ " elif operator == \"*\":\n",
+ " return a * b\n",
+ " elif operator == \"/\":\n",
+ " return int(a / b)\n",
+ " else:\n",
+ " raise ValueError(\"Invalid operator\")\n",
+ "\n",
+ "# Create the agent that uses the LLM.\n",
+ "assistant = ConversableAgent(\n",
+ " name=\"Assistant\",\n",
+ " system_message=\"You are a helpful AI assistant. \"\n",
+ " \"You can help with simple calculations. \"\n",
+ " \"Return 'TERMINATE' when the task is done.\",\n",
+ " llm_config={\"config_list\": config_list},\n",
+ ")\n",
+ "\n",
+ "# The user proxy agent is used for interacting with the assistant agent\n",
+ "# and executes tool calls.\n",
+ "user_proxy = ConversableAgent(\n",
+ " name=\"User\",\n",
+ " llm_config=False,\n",
+ " is_termination_msg=lambda msg: msg.get(\"content\") is not None and \"TERMINATE\" in msg[\"content\"],\n",
+ " human_input_mode=\"NEVER\",\n",
+ ")\n",
+ "\n",
+ "assistant.register_for_llm(name=\"calculator\", description=\"A simple calculator\")(calculator)\n",
+ "user_proxy.register_for_execution(name=\"calculator\")(calculator)\n",
+ "\n",
+ "# Register the calculator function to the two agents.\n",
+ "register_function(\n",
+ " calculator,\n",
+ " caller=assistant, # The assistant agent can suggest calls to the calculator.\n",
+ " executor=user_proxy, # The user proxy agent can execute the calculator calls.\n",
+ " name=\"calculator\", # By default, the function name is used as the tool name.\n",
+ " description=\"A simple calculator\", # A description of the tool.\n",
+ ")\n",
+ "\n",
+ "# Let the assistant start the conversation. It will end when the user types \"exit\".\n",
+ "user_proxy.initiate_chat(assistant, message=\"What is (1423 - 123) / 3 + (32 + 23) * 5?\")\n",
+ "\n",
+ "agentops.end_session(\"Success\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f67b0305-1247-489e-b1b0-829127af76d3",
+ "metadata": {},
+ "source": [
+ "You can see your run in action at [app.agentops.ai](app.agentops.ai). In this example, the AgentOps dashboard will show:\n",
+ "\n",
+ "* Agents talking to each other\n",
+ "* Each use of the calculator tool\n",
+ "* Each call to OpenAI for LLM use"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0b8919ec-ff81-4c94-95de-0d2c5dabbdd9",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}