From fc99a6f3fd835102180c18ddbeb68bf9ae7db8e3 Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Fri, 9 Aug 2024 04:56:55 -0700 Subject: [PATCH] Updates to notebook testing automation (#342) * Adding notebook testing automation (#340) * Fixing path in GitHub action * Renaming job action and updating notebook * Testing new notebook * Standardizing notebooks * More updating more testing * More standardizing * Updating more notebooks * Testing all ipynb directly under examples * Standardizing notebooks * Updating notebooks * adding branch to action * typo * Updating Action to read agentops.log for each notebook. Fixing recording-events.ipynb * Fixing Action to save agentops.log * Fixing Action * iterating workflow * Should continue executing notebooks when one fails * Workflow now for all ipynb. Fixing lots of notebooks * Fixing notebooks * Adding multion key * workflow finishing touches --- .github/workflows/test-notebooks.yml | 77 +++++ .../multion/Step_by_step_web_browsing.html | 4 +- examples/autogen/AgentChat.ipynb | 176 +++++------ examples/autogen/MathAgent.ipynb | 244 ++++----------- examples/cohere/cohere_example.ipynb | 152 +++++++++ examples/cohere_example.py | 50 --- examples/crew/job_posting.ipynb | 14 - .../{ => langchain}/langchain_examples.ipynb | 296 ++++++++++-------- examples/litellm/litellm_example.ipynb | 140 +++++++++ examples/multi_agent_example.ipynb | 46 ++- examples/multi_agent_groq_example.ipynb | 275 ++++------------ examples/multi_session_llm.ipynb | 245 +++++++++------ .../multion/Autonomous_web_browsing.ipynb | 76 +++-- examples/multion/Sample_browsing_agent.ipynb | 53 +++- .../multion/Step_by_step_web_browsing.ipynb | 90 ++++-- examples/multion/Webpage_data_retrieval.ipynb | 65 +++- examples/openai-gpt.ipynb | 2 +- examples/recording-events.ipynb | 78 ++++- .../agentchat_agentops.ipynb | 83 +++-- .../core_manual_tests/upsert_events.py.ipynb | 50 --- 20 files changed, 1248 insertions(+), 968 deletions(-) create mode 100644 .github/workflows/test-notebooks.yml create mode 100644 examples/cohere/cohere_example.ipynb delete mode 100644 examples/cohere_example.py rename examples/{ => langchain}/langchain_examples.ipynb (75%) create mode 100644 examples/litellm/litellm_example.ipynb delete mode 100644 tests/core_manual_tests/upsert_events.py.ipynb diff --git a/.github/workflows/test-notebooks.yml b/.github/workflows/test-notebooks.yml new file mode 100644 index 000000000..3d2d2ce98 --- /dev/null +++ b/.github/workflows/test-notebooks.yml @@ -0,0 +1,77 @@ +name: Test Notebooks +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] +jobs: + test-notebooks: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -U jupyter + - name: Create .env file + run: | + echo "OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> .env + echo "AGENTOPS_API_KEY=${{ secrets.AGENTOPS_API_KEY }}" >> .env + echo "CO_API_KEY=${{ secrets.CO_API_KEY }}" >> .env + echo "GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}" >> .env + echo "MULTION_API_KEY=${{ secrets.MULTION_API_KEY }}" >> .env + echo "SERPER_API_KEY=${{ secrets.SERPER_API_KEY }}" >> .env + - name: Run notebooks and check for errors + run: | + mkdir -p logs + exit_code=0 + for notebook in $(find . -name '*.ipynb'); do + notebook_name=$(basename "$notebook" .ipynb) + notebook_path=$(realpath "$notebook") + notebook_dir=$(dirname "$notebook_path") + + # Remove any existing agentops.log before running the notebook + rm -f "${notebook_dir}/agentops.log" + + # Run the notebook + jupyter execute "$notebook_path" || true + + # Check if agentops.log was created + if [ -f "${notebook_dir}/agentops.log" ]; then + dest_log="logs/agentops-${notebook_name}.log" + mv "${notebook_dir}/agentops.log" "$dest_log" + + # Check agentops log for errors or warnings + if grep -E "ERROR|WARNING" "$dest_log"; then + echo "Errors or warnings found in $dest_log for Python ${{ matrix.python-version }}" + exit_code=1 + else + echo "No errors or warnings found in $dest_log for Python ${{ matrix.python-version }}" + fi + else + echo "No agentops.log generated for $notebook_name" + fi + done + + # Check if any logs were found + if [ $(find logs -name 'agentops-*.log' | wc -l) -eq 0 ]; then + echo "No agentops.log files were generated for any notebook" + fi + + exit $exit_code + + - name: Upload logs as artifacts + uses: actions/upload-artifact@v4 + if: always() + with: + name: notebook-logs-${{ matrix.python-version }} + path: logs/agentops-*.log + if-no-files-found: warn \ No newline at end of file diff --git a/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html b/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html index 09a3f8300..c7c5143d2 100644 --- a/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html +++ b/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html @@ -330,8 +330,8 @@

Step Stream

import os
 
-os.environ["MULTION_API_KEY"] = "e8cbbd0f8fa042f49f267a44bf97425c"
-os.environ["AGENTOPS_API_KEY"] = "a640373b-30ae-4655-a1f3-5caa882a8721"
+os.environ["MULTION_API_KEY"] = +os.environ["AGENTOPS_API_KEY"] =
\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n",
-    "agentops.init(api_key=\"...\", tags=[\"simple-autogen-example\"])\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"simple-autogen-example\"])\n",
     "\n",
     "print(\"AgentOps is now running. You can view your session in the link above\")"
    ]
@@ -63,7 +105,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 5,
    "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f",
    "metadata": {},
    "outputs": [
@@ -71,95 +113,29 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "\u001b[33magent\u001b[0m (to user):\n",
-      "\n",
-      "How can I help you today?\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation:  Tell me a joke about AgentOps\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33muser\u001b[0m (to agent):\n",
-      "\n",
-      "Tell me a joke about AgentOps\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
+      "\u001b[31m\n",
+      ">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
       "\u001b[31m\n",
       ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33magent\u001b[0m (to user):\n",
-      "\n",
-      "Why don't AgentOps teams ever play hide and seek?\n",
-      "\n",
-      "Because good luck hiding when they always know where everyone is supposed to be!\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation:  Another\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
       "\u001b[33muser\u001b[0m (to agent):\n",
       "\n",
-      "Another\n",
+      "\n",
       "\n",
       "--------------------------------------------------------------------------------\n",
       "\u001b[31m\n",
       ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
       "\u001b[33magent\u001b[0m (to user):\n",
       "\n",
-      "Why did the AgentOps team bring a ladder to work?\n",
-      "\n",
-      "Because they’re always reaching for high-level optimizations!\n",
+      "It seems there might still be an issue. If you need assistance or have questions later on, don't hesitate to reach out. I'm here to help whenever you're ready!\n",
       "\n",
       "--------------------------------------------------------------------------------\n"
      ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation:  exit\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: This run's cost $0.001080\n",
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=24c5d9f6-fb82-41e6-a468-2dc74a5318a3\u001b[0m\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Success! Visit your AgentOps dashboard to see the replay\n"
-     ]
     }
    ],
    "source": [
-    "# Define an openai api key for the agent configurations\n",
-    "openai_api_key = \"...\"\n",
+    "# Define model, openai api key, tags, etc in the agent configuration\n",
     "config_list = [\n",
-    "    {\"model\": \"gpt-4-turbo\", \"api_key\": openai_api_key, \"tags\": [\"gpt-4\", \"tool\"]}\n",
+    "    {\"model\": \"gpt-4-turbo\", \"api_key\": OPENAI_API_KEY, \"tags\": [\"gpt-4\", \"tool\"]}\n",
     "]\n",
     "\n",
     "# Create the agent that uses the LLM.\n",
@@ -185,14 +161,6 @@
     "\n",
     "The dashboard will display LLM events for each message sent by each agent, including those made by the human user."
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "72993a75-1031-4874-aa26-0ef816a3256c",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -211,7 +179,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.12.3"
   }
  },
  "nbformat": 4,
diff --git a/examples/autogen/MathAgent.ipynb b/examples/autogen/MathAgent.ipynb
index d7100c30e..c0e0c99d8 100644
--- a/examples/autogen/MathAgent.ipynb
+++ b/examples/autogen/MathAgent.ipynb
@@ -14,33 +14,77 @@
     "AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "083244fa",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 1,
-   "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+   "execution_count": null,
+   "id": "9c8104ad",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U pyautogen\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "cc44e459",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7672f591",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=51556b00-428b-4cae-b815-1320ebc7e810\u001b[0m\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "AgentOps is now running. You can view your session in the link above\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
+    "from autogen import ConversableAgent\n",
     "from typing import Annotated, Literal\n",
     "from autogen import ConversableAgent, register_function\n",
     "import agentops\n",
-    "\n",
-    "agentops.init(api_key=\"...\", tags=[\"autogen-tool-example\"])\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "24f8bd70",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9eeaef34",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"autogen-tool-example\"])\n",
     "\n",
     "print(\"AgentOps is now running. You can view your session in the link above\")"
    ]
@@ -62,165 +106,23 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f",
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Define an openai api key for the agent configurations\n",
-    "openai_api_key = \"...\"\n",
+    "# Define model, openai api key, tags, etc in the agent configuration\n",
     "config_list = [\n",
-    "    {\"model\": \"gpt-4-turbo\", \"api_key\": openai_api_key, \"tags\": [\"gpt-4\", \"tool\"]}\n",
+    "    {\"model\": \"gpt-4-turbo\", \"api_key\": OPENAI_API_KEY, \"tags\": [\"gpt-4\", \"tool\"]}\n",
     "]"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "9e4dfe37-85e0-4035-a314-3459c6e378c4",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: Cannot start session - session already started\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "What is (1423 - 123) / 3 + (32 + 23) * 5?\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/Users/reibs/Projects/autogen/autogen/agentchat/conversable_agent.py:2489: UserWarning: Function 'calculator' is being overridden.\n",
-      "  warnings.warn(f\"Function '{tool_sig['function']['name']}' is being overridden.\", UserWarning)\n",
-      "/Users/reibs/Projects/autogen/autogen/agentchat/conversable_agent.py:2408: UserWarning: Function 'calculator' is being overridden.\n",
-      "  warnings.warn(f\"Function '{name}' is being overridden.\", UserWarning)\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "\u001b[32m***** Suggested tool call (call_pSJXJKu1qbfRV4SVNHzTaG1z): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 1423, \"b\": 123, \"operator\": \"-\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\u001b[32m***** Suggested tool call (call_kHRXi8vq5XsZSrGDnVTA1oy7): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 32, \"b\": 23, \"operator\": \"+\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_pSJXJKu1qbfRV4SVNHzTaG1z) *****\u001b[0m\n",
-      "1300\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_kHRXi8vq5XsZSrGDnVTA1oy7) *****\u001b[0m\n",
-      "55\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "\u001b[32m***** Suggested tool call (call_srDG3bYDpkdoIVn1mVVJNTJj): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 1300, \"b\": 3, \"operator\": \"/\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\u001b[32m***** Suggested tool call (call_jXJHjxZlnsHEbLaG4hQXUX1v): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 55, \"b\": 5, \"operator\": \"*\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_srDG3bYDpkdoIVn1mVVJNTJj) *****\u001b[0m\n",
-      "433\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_jXJHjxZlnsHEbLaG4hQXUX1v) *****\u001b[0m\n",
-      "275\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "\u001b[32m***** Suggested tool call (call_jKyiL6oizDZPfX16hhVi6pa3): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\":433,\"b\":275,\"operator\":\"+\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_jKyiL6oizDZPfX16hhVi6pa3) *****\u001b[0m\n",
-      "708\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "The result of the expression \\((1423 - 123) / 3 + (32 + 23) * 5\\) is 708. \n",
-      "\n",
-      "'TERMINATE'\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: This run's cost $0.000600\n",
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=51556b00-428b-4cae-b815-1320ebc7e810\u001b[0m\u001b[0m\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "Operator = Literal[\"+\", \"-\", \"*\", \"/\"]\n",
     "\n",
@@ -288,14 +190,6 @@
     "* Each use of the calculator tool\n",
     "* Each call to OpenAI for LLM use"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0b8919ec-ff81-4c94-95de-0d2c5dabbdd9",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/cohere/cohere_example.ipynb b/examples/cohere/cohere_example.ipynb
new file mode 100644
index 000000000..96ff01d6c
--- /dev/null
+++ b/examples/cohere/cohere_example.ipynb
@@ -0,0 +1,152 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Cohere example"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U cohere\n",
+    "%pip install -U agentops"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import cohere\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "CO_API_KEY = os.getenv(\"CO_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"cohere-example\"])\n",
+    "co = cohere.Client()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "stream = co.chat_stream(\n",
+    "    message=\"Tell me everything you can about AgentOps\",\n",
+    "    connectors=[{\"id\": \"web-search\"}],\n",
+    ")\n",
+    "\n",
+    "response = \"\"\n",
+    "for event in stream:\n",
+    "    if event.event_type == \"text-generation\":\n",
+    "        response += event.text\n",
+    "        print(event.text, end=\"\")\n",
+    "    elif event.event_type == \"stream-end\":\n",
+    "        print(\"\\n\")\n",
+    "        print(event)\n",
+    "        print(\"\\n\")\n",
+    "\n",
+    "stream = co.chat_stream(\n",
+    "    chat_history=[\n",
+    "        {\n",
+    "            \"role\": \"SYSTEM\",\n",
+    "            \"message\": \"You are Adam Silverman: die-hard advocate of AgentOps, leader in AI Agent observability\",\n",
+    "        },\n",
+    "        {\n",
+    "            \"role\": \"CHATBOT\",\n",
+    "            \"message\": \"How's your day going? I'd like to tell you about AgentOps: {response}\",\n",
+    "        },\n",
+    "    ],\n",
+    "    message=\"Based on your newfound knowledge of AgentOps, is Cohere a suitable partner for them and how could they integrate?\",\n",
+    "    connectors=[{\"id\": \"web-search\"}],\n",
+    ")\n",
+    "\n",
+    "response = \"\"\n",
+    "for event in stream:\n",
+    "    if event.event_type == \"text-generation\":\n",
+    "        response += event.text\n",
+    "        print(event.text, end=\"\")\n",
+    "    elif event.event_type == \"stream-end\":\n",
+    "        print(\"\\n\")\n",
+    "        print(event)\n",
+    "        print(\"\\n\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.end_session(\"Success\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/cohere_example.py b/examples/cohere_example.py
deleted file mode 100644
index 4c8557cd9..000000000
--- a/examples/cohere_example.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import cohere
-import agentops  # just
-from dotenv import load_dotenv
-
-load_dotenv()
-
-agentops.init(tags=["cohere", "agentops-demo"])  # three
-co = cohere.Client()
-
-stream = co.chat_stream(
-    message="Tell me everything you can about AgentOps",
-    connectors=[{"id": "web-search"}],
-)
-
-response = ""
-for event in stream:
-    if event.event_type == "text-generation":
-        response += event.text
-        print(event.text, end="")
-    elif event.event_type == "stream-end":
-        print("\n")
-        print(event)
-        print("\n")
-
-stream = co.chat_stream(
-    chat_history=[
-        {
-            "role": "SYSTEM",
-            "message": "You are Adam Silverman: die-hard advocate of AgentOps, leader in AI Agent observability",
-        },
-        {
-            "role": "CHATBOT",
-            "message": "How's your day going? I'd like to tell you about AgentOps: {response}",
-        },
-    ],
-    message="Based on your newfound knowledge of AgentOps, is Cohere a suitable partner for them and how could they integrate?",
-    connectors=[{"id": "web-search"}],
-)
-
-response = ""
-for event in stream:
-    if event.event_type == "text-generation":
-        response += event.text
-        print(event.text, end="")
-    elif event.event_type == "stream-end":
-        print("\n")
-        print(event)
-        print("\n")
-
-agentops.end_session("Success")  # lines
diff --git a/examples/crew/job_posting.ipynb b/examples/crew/job_posting.ipynb
index f6f75eeb7..8fd3e5ed5 100644
--- a/examples/crew/job_posting.ipynb
+++ b/examples/crew/job_posting.ipynb
@@ -1,14 +1,5 @@
 {
  "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# %pip install -e ../.."
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -223,11 +214,6 @@
     "\n",
     "agentops.end_session(\"Success\")"
    ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/langchain_examples.ipynb b/examples/langchain/langchain_examples.ipynb
similarity index 75%
rename from examples/langchain_examples.ipynb
rename to examples/langchain/langchain_examples.ipynb
index 4fcba18ab..0069add46 100644
--- a/examples/langchain_examples.ipynb
+++ b/examples/langchain/langchain_examples.ipynb
@@ -7,9 +7,35 @@
    "source": [
     "# AgentOps Langchain Agent Implementation\n",
     "\n",
-    "Using AgentOps monitoring with Langchain is simple. We've created a LangchainCallbackHandler that will do all of the heavy lifting!\n",
-    "\n",
-    "First we'll import the typical Langchain packages:"
+    "Using AgentOps monitoring with Langchain is simple. We've created a LangchainCallbackHandler that will do all of the heavy lifting!"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "1516a90d",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e5fc8497",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U langchain\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "9480596a",
+   "metadata": {},
+   "source": [
+    "Then import them"
    ]
   },
   {
@@ -19,10 +45,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import os\n",
     "from langchain.chat_models import ChatOpenAI\n",
     "from langchain.agents import initialize_agent, AgentType\n",
-    "from langchain.agents import tool"
+    "from langchain.agents import tool\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
    ]
   },
   {
@@ -35,61 +62,59 @@
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "7e8f8cd098ad5b57",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "from agentops.partners.langchain_callback_handler import (\n",
     "    LangchainCallbackHandler as AgentOpsLangchainCallbackHandler,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "7e8f8cd098ad5b57",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "25f189b0",
+   "metadata": {},
    "source": [
-    "Next, we'll grab our two API keys. You can use dotenv like below or however else you like to load environment variables"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "14a1b8e08a2e9eb3"
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "974514a8",
+   "metadata": {},
    "outputs": [],
    "source": [
-    "from dotenv import load_dotenv\n",
-    "\n",
-    "load_dotenv()"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "ff6cfc570599871f",
-   "execution_count": null
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n",
-    "\n",
-    "Pass in your API key, and optionally any tags to describe this session for easier lookup in the AO dashboard."
-   ],
+   "id": "51f083697b783fa4",
    "metadata": {
     "collapsed": false
    },
-   "id": "51f083697b783fa4"
+   "source": [
+    "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n",
+    "\n",
+    "Optionally pass in any tags to describe this session for easier lookup in the AO dashboard."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "d432fe915edb6365",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n",
-    "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")\n",
-    "\n",
     "agentops_handler = AgentOpsLangchainCallbackHandler(\n",
     "    api_key=AGENTOPS_API_KEY, tags=[\"Langchain Example\"]\n",
     ")\n",
@@ -97,47 +122,47 @@
     "llm = ChatOpenAI(\n",
     "    openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "d432fe915edb6365",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "You can also retrieve the `session_id` of the newly created session."
-   ],
+   "id": "38d309f07363b58e",
    "metadata": {
     "collapsed": false
    },
-   "id": "38d309f07363b58e"
+   "source": [
+    "You can also retrieve the `session_id` of the newly created session."
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "print(\"Agent Ops session ID: \" + str(agentops_handler.session_id))"
-   ],
+   "execution_count": null,
+   "id": "f7e3a37cde3f9c22",
    "metadata": {
     "collapsed": false
    },
-   "id": "f7e3a37cde3f9c22",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "print(\"Agent Ops session ID: \" + str(agentops_handler.session_id))"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded."
-   ],
+   "id": "42f226ace56ef6f5",
    "metadata": {
     "collapsed": false
    },
-   "id": "42f226ace56ef6f5"
+   "source": [
+    "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "c103a2edbe837abd",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "@tool\n",
@@ -150,46 +175,48 @@
     "\n",
     "\n",
     "tools = [find_movie]"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "c103a2edbe837abd"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "For each tool, you need to also add the callback handler"
-   ],
+   "id": "4fb7633857b19bf0",
    "metadata": {
     "collapsed": false
    },
-   "id": "4fb7633857b19bf0"
+   "source": [
+    "For each tool, you need to also add the callback handler"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "a0345f08bf1c5ecd",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "for t in tools:\n",
     "    t.callbacks = [agentops_handler]"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "a0345f08bf1c5ecd"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Finally, let's use our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard"
-   ],
+   "id": "12a02b833716676b",
    "metadata": {
     "collapsed": false
    },
-   "id": "12a02b833716676b"
+   "source": [
+    "Finally, let's use our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "2d2e83fa69b30add",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "agent = initialize_agent(\n",
@@ -202,92 +229,93 @@
     "    ],  # You must pass in a callback handler to record your agent\n",
     "    handle_parsing_errors=True,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "2d2e83fa69b30add"
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "agent.run(\"What comedies are playing?\", callbacks=[agentops_handler])"
-   ],
+   "execution_count": null,
+   "id": "df2bc3a384493e1e",
    "metadata": {
     "collapsed": false
    },
-   "id": "df2bc3a384493e1e"
+   "outputs": [],
+   "source": [
+    "agent.run(\"What comedies are playing?\", callbacks=[agentops_handler])"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "2230edd919182a55",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "## Check your session\n",
     "Finally, check your run on [AgentOps](https://app.agentops.ai)\n",
     "![image.png](attachment:3d9393fa-3d6a-4193-b6c9-43413dc19d15.png)"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "2230edd919182a55"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "fbf4a3ec5fa60d74",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "# Async Agents\n",
     "\n",
     "Several langchain agents require async callback handlers. AgentOps also supports this."
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "fbf4a3ec5fa60d74"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "ed63a166b343e1a2",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "import os\n",
     "from langchain.chat_models import ChatOpenAI\n",
     "from langchain.agents import initialize_agent, AgentType\n",
-    "from langchain.agents import tool"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "ed63a166b343e1a2"
+    "from langchain.agents import tool\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "aa15223969f97b3d",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "from agentops.partners.langchain_callback_handler import (\n",
     "    AsyncLangchainCallbackHandler as AgentOpsAsyncLangchainCallbackHandler,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "aa15223969f97b3d"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "824e1d44",
+   "metadata": {},
    "outputs": [],
    "source": [
-    "from dotenv import load_dotenv\n",
-    "\n",
     "load_dotenv()\n",
-    "\n",
-    "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n",
-    "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "818357483f039b60"
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "ae76cfe058f5e4e4",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "agentops_handler = AgentOpsAsyncLangchainCallbackHandler(\n",
@@ -299,14 +327,15 @@
     ")\n",
     "\n",
     "print(\"Agent Ops session ID: \" + str(await agentops_handler.session_id))"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "ae76cfe058f5e4e4"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "1201049766be84a7",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "@tool\n",
@@ -322,14 +351,15 @@
     "\n",
     "for t in tools:\n",
     "    t.callbacks = [agentops_handler]"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "1201049766be84a7"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "8d4f9dd39b79d542",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "agent = initialize_agent(\n",
@@ -342,24 +372,20 @@
     ")\n",
     "\n",
     "await agent.arun(\"What comedies are playing?\")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "8d4f9dd39b79d542"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "fb276a2e-f1c3-4f0f-8818-b7730e9d3ff7",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "## Check your session\n",
     "Finally, check your run on [AgentOps](https://app.agentops.ai)\n",
     "\n",
     "![image.png](attachment:69f2121a-d437-4c09-bbbe-c76c9243ee19.png)"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "fb276a2e-f1c3-4f0f-8818-b7730e9d3ff7"
+   ]
   }
  ],
  "metadata": {
diff --git a/examples/litellm/litellm_example.ipynb b/examples/litellm/litellm_example.ipynb
new file mode 100644
index 000000000..78cccf3a0
--- /dev/null
+++ b/examples/litellm/litellm_example.ipynb
@@ -0,0 +1,140 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### NOTE:\n",
+    "AgentOps requires that you import and call LiteLLM differently than LiteLLM's documentation.  \n",
+    "Instead of\n",
+    "``` python\n",
+    "from litellm import completion\n",
+    "completion()\n",
+    "```\n",
+    "You should import and call like this:\n",
+    "``` python\n",
+    "import litellm\n",
+    "litellm.completion()\n",
+    "```\n",
+    "\n",
+    "Please see examples below\n",
+    "\n",
+    "[See our LiteLLM docs](https://docs.agentops.ai/v1/integrations/litellm)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U litellm\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import litellm\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "LiteLLM allows you to use several models including from OpenAI, Llama, Mistral, Claude, Gemini, Gemma, Dall-E, Whisper, and more all using the OpenAI format. To use a different model all you need to change are the API KEY and model (litellm.completion(model=\"...\"))."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = (\n",
+    "    os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    ")  # or the provider of your choosing\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"litellm-example\"])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [{\"role\": \"user\", \"content\": \"Write a 12 word poem about secret agents.\"}]\n",
+    "response = litellm.completion(model=\"gpt-4\", messages=messages) # or the model of your choosing\n",
+    "print(response.choices[0].message.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.end_session(\"Success\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/multi_agent_example.ipynb b/examples/multi_agent_example.ipynb
index 7254f87b8..11cb6b494 100644
--- a/examples/multi_agent_example.ipynb
+++ b/examples/multi_agent_example.ipynb
@@ -14,6 +14,34 @@
     "This is an example implementation of tracking events from two separate agents"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "c4e0d5ff",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5439d798",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8f69131d",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -28,14 +56,22 @@
    "source": [
     "import agentops\n",
     "from agentops import track_agent\n",
-    "from dotenv import load_dotenv\n",
-    "import os\n",
     "from openai import OpenAI\n",
+    "import os\n",
+    "from dotenv import load_dotenv\n",
     "import logging\n",
     "\n",
     "from IPython.display import display, Markdown"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "6a65f091",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -49,8 +85,8 @@
    "outputs": [],
    "source": [
     "load_dotenv()\n",
-    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\", \"\")\n",
-    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\", \"\")\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
     "logging.basicConfig(\n",
     "    level=logging.DEBUG\n",
     ")  # this will let us see that calls are assigned to an agent"
@@ -68,7 +104,7 @@
    },
    "outputs": [],
    "source": [
-    "agentops.init(AGENTOPS_API_KEY, tags=[\"multi-agent-notebook\"])\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"multi-agent-notebook\"])\n",
     "openai_client = OpenAI(api_key=OPENAI_API_KEY)"
    ]
   },
diff --git a/examples/multi_agent_groq_example.ipynb b/examples/multi_agent_groq_example.ipynb
index bb63481b1..c86e83aae 100644
--- a/examples/multi_agent_groq_example.ipynb
+++ b/examples/multi_agent_groq_example.ipynb
@@ -14,9 +14,37 @@
     "This is an example implementation of tracking events from two separate agents"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "fde50a03",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f846ae29",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U agentops\n",
+    "%pip install -U groq\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "882b027b",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "id": "7c566fac57d3b6ce",
    "metadata": {
     "collapsed": false,
@@ -36,9 +64,17 @@
     "from IPython.display import display, Markdown"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "d614aaf3",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "id": "9f8c52496c04693",
    "metadata": {
     "collapsed": false,
@@ -49,8 +85,8 @@
    "outputs": [],
    "source": [
     "load_dotenv()\n",
-    "GROQ_API_KEY = os.getenv(\"GROQ_API_KEY\", \"\")\n",
-    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\", \"\")\n",
+    "GROQ_API_KEY = os.getenv(\"GROQ_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
     "logging.basicConfig(\n",
     "    level=logging.DEBUG\n",
     ")  # this will let us see that calls are assigned to an agent"
@@ -58,7 +94,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "af062552554d60ce",
    "metadata": {
     "collapsed": false,
@@ -66,24 +102,9 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_session HTTP/11\" 200 204\n",
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=892edb44-774d-4f52-a9b8-4d4eada5b434\u001b[0m\u001b[0m\n",
-      "INFO:agentops:\u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=892edb44-774d-4f52-a9b8-4d4eada5b434\u001b[0m\u001b[0m\n",
-      "DEBUG:httpx:load_ssl_context verify=True cert=None trust_env=True http2=False\n",
-      "DEBUG:httpx:load_verify_locations cafile='/Users/manu_suryavansh/miniforge3/envs/agentsops_dev/lib/python3.11/site-packages/certifi/cacert.pem'\n",
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_events HTTP/11\" 200 9\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "agentops.init(AGENTOPS_API_KEY, tags=[\"multi-agent-groq-notebook\"])\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"multi-agent-groq-notebook\"])\n",
     "groq_client = Groq(api_key=GROQ_API_KEY)"
    ]
   },
@@ -102,7 +123,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "id": "727e3cc26ce3ec3",
    "metadata": {
     "collapsed": false,
@@ -148,7 +169,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "id": "79b75d65de738522",
    "metadata": {
     "collapsed": false,
@@ -156,18 +177,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_agent HTTP/11\" 200 9\n",
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_agent HTTP/11\" 200 9\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "qa = QaAgent()\n",
     "engineer = EngineerAgent()"
@@ -190,42 +200,17 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "id": "7272b927-67ef-4b8c-84a5-63ed06f75aa5",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:groq._base_client:Request options: {'method': 'post', 'url': '/openai/v1/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': 'You are a software engineer and only output python code, no markdown tags.'}, {'role': 'user', 'content': 'python function to test prime number'}], 'model': 'llama3-70b-8192', 'temperature': 0.5}}\n",
-      "DEBUG:groq._base_client:Sending HTTP Request: POST https://api.groq.com/openai/v1/chat/completions\n",
-      "DEBUG:httpcore.connection:connect_tcp.started host='api.groq.com' port=443 local_address=None timeout=5.0 socket_options=None\n",
-      "DEBUG:httpcore.connection:connect_tcp.complete return_value=\n",
-      "DEBUG:httpcore.connection:start_tls.started ssl_context= server_hostname='api.groq.com' timeout=5.0\n",
-      "DEBUG:httpcore.connection:start_tls.complete return_value=\n",
-      "DEBUG:httpcore.http11:send_request_headers.started request=\n",
-      "DEBUG:httpcore.http11:send_request_headers.complete\n",
-      "DEBUG:httpcore.http11:send_request_body.started request=\n",
-      "DEBUG:httpcore.http11:send_request_body.complete\n",
-      "DEBUG:httpcore.http11:receive_response_headers.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Sun, 21 Jul 2024 05:55:22 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'Cache-Control', b'private, max-age=0, no-store, no-cache, must-revalidate'), (b'vary', b'Origin'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'30000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'29963'), (b'x-ratelimit-reset-requests', b'1.728s'), (b'x-ratelimit-reset-tokens', b'74ms'), (b'x-request-id', b'req_01j39xqscce4dbg5h08vrftym2'), (b'via', b'1.1 google'), (b'alt-svc', b'h3=\":443\"; ma=86400'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=vDBNcm.4NuP7B9MJyHy7WVBS7CVF.SyvXXsf7ZXdpT8-1721541322-1.0.1.1-QRg7ZBBgC845heu3O2ZfJySw1nqhlOCwpF29NmD1H9xnMUNFOstcyHCHabYKSBZXq6iNGbkYaId01XpPYOfuWQ; path=/; expires=Sun, 21-Jul-24 06:25:22 GMT; domain=.groq.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'8a68f10f2ba89652-SJC'), (b'Content-Encoding', b'gzip')])\n",
-      "INFO:httpx:HTTP Request: POST https://api.groq.com/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
-      "DEBUG:httpcore.http11:receive_response_body.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_body.complete\n",
-      "DEBUG:httpcore.http11:response_closed.started\n",
-      "DEBUG:httpcore.http11:response_closed.complete\n",
-      "DEBUG:groq._base_client:HTTP Response: POST https://api.groq.com/openai/v1/chat/completions \"200 OK\" Headers({'date': 'Sun, 21 Jul 2024 05:55:22 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'cache-control': 'private, max-age=0, no-store, no-cache, must-revalidate', 'vary': 'Origin', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '30000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '29963', 'x-ratelimit-reset-requests': '1.728s', 'x-ratelimit-reset-tokens': '74ms', 'x-request-id': 'req_01j39xqscce4dbg5h08vrftym2', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'set-cookie': '__cf_bm=vDBNcm.4NuP7B9MJyHy7WVBS7CVF.SyvXXsf7ZXdpT8-1721541322-1.0.1.1-QRg7ZBBgC845heu3O2ZfJySw1nqhlOCwpF29NmD1H9xnMUNFOstcyHCHabYKSBZXq6iNGbkYaId01XpPYOfuWQ; path=/; expires=Sun, 21-Jul-24 06:25:22 GMT; domain=.groq.com; HttpOnly; Secure; SameSite=None', 'server': 'cloudflare', 'cf-ray': '8a68f10f2ba89652-SJC', 'content-encoding': 'gzip'})\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "generated_func = engineer.completion(\"python function to test prime number\")"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": null,
    "id": "830b86dac47dceb3",
    "metadata": {
     "collapsed": false,
@@ -233,40 +218,14 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "data": {
-      "text/markdown": [
-       "```python\n",
-       "def is_prime(n):\n",
-       "    if n <= 1:\n",
-       "        return False\n",
-       "    if n == 2:\n",
-       "        return True\n",
-       "    if n % 2 == 0:\n",
-       "        return False\n",
-       "    max_divisor = int(n**0.5) + 1\n",
-       "    for d in range(3, max_divisor, 2):\n",
-       "        if n % d == 0:\n",
-       "            return False\n",
-       "    return True\n",
-       "```"
-      ],
-      "text/plain": [
-       ""
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
+   "outputs": [],
    "source": [
     "display(Markdown(\"```python\\n\" + generated_func + \"\\n```\"))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": null,
    "id": "63c9d0d457aee91a",
    "metadata": {
     "collapsed": false,
@@ -274,28 +233,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:groq._base_client:Request options: {'method': 'post', 'url': '/openai/v1/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': 'You are a qa engineer and only output python code, no markdown tags.'}, {'role': 'user', 'content': 'Write a python unit test that test the following function: \\n def is_prime(n):\\n    if n <= 1:\\n        return False\\n    if n == 2:\\n        return True\\n    if n % 2 == 0:\\n        return False\\n    max_divisor = int(n**0.5) + 1\\n    for d in range(3, max_divisor, 2):\\n        if n % d == 0:\\n            return False\\n    return True'}], 'model': 'llama3-70b-8192', 'temperature': 0.5}}\n",
-      "DEBUG:groq._base_client:Sending HTTP Request: POST https://api.groq.com/openai/v1/chat/completions\n",
-      "DEBUG:httpcore.http11:send_request_headers.started request=\n",
-      "DEBUG:httpcore.http11:send_request_headers.complete\n",
-      "DEBUG:httpcore.http11:send_request_body.started request=\n",
-      "DEBUG:httpcore.http11:send_request_body.complete\n",
-      "DEBUG:httpcore.http11:receive_response_headers.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Sun, 21 Jul 2024 05:55:23 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'Cache-Control', b'private, max-age=0, no-store, no-cache, must-revalidate'), (b'vary', b'Origin'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'30000'), (b'x-ratelimit-remaining-requests', b'49998'), (b'x-ratelimit-remaining-tokens', b'29845'), (b'x-ratelimit-reset-requests', b'2.960999999s'), (b'x-ratelimit-reset-tokens', b'310ms'), (b'x-request-id', b'req_01j39xqsy5fxgth4w9q6r24h9w'), (b'via', b'1.1 google'), (b'alt-svc', b'h3=\":443\"; ma=86400'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Server', b'cloudflare'), (b'CF-RAY', b'8a68f112be2c9652-SJC'), (b'Content-Encoding', b'gzip')])\n",
-      "INFO:httpx:HTTP Request: POST https://api.groq.com/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
-      "DEBUG:httpcore.http11:receive_response_body.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_body.complete\n",
-      "DEBUG:httpcore.http11:response_closed.started\n",
-      "DEBUG:httpcore.http11:response_closed.complete\n",
-      "DEBUG:groq._base_client:HTTP Response: POST https://api.groq.com/openai/v1/chat/completions \"200 OK\" Headers({'date': 'Sun, 21 Jul 2024 05:55:23 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'cache-control': 'private, max-age=0, no-store, no-cache, must-revalidate', 'vary': 'Origin', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '30000', 'x-ratelimit-remaining-requests': '49998', 'x-ratelimit-remaining-tokens': '29845', 'x-ratelimit-reset-requests': '2.960999999s', 'x-ratelimit-reset-tokens': '310ms', 'x-request-id': 'req_01j39xqsy5fxgth4w9q6r24h9w', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'server': 'cloudflare', 'cf-ray': '8a68f112be2c9652-SJC', 'content-encoding': 'gzip'})\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "generated_test = qa.completion(\n",
     "    \"Write a python unit test that test the following function: \\n \" + generated_func\n",
@@ -304,7 +242,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": null,
    "id": "a88ffcbd2015d422",
    "metadata": {
     "collapsed": false,
@@ -312,67 +250,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "data": {
-      "text/markdown": [
-       "```python\n",
-       "import unittest\n",
-       "\n",
-       "def is_prime(n):\n",
-       "    if n <= 1:\n",
-       "        return False\n",
-       "    if n == 2:\n",
-       "        return True\n",
-       "    if n % 2 == 0:\n",
-       "        return False\n",
-       "    max_divisor = int(n**0.5) + 1\n",
-       "    for d in range(3, max_divisor, 2):\n",
-       "        if n % d == 0:\n",
-       "            return False\n",
-       "    return True\n",
-       "\n",
-       "class TestIsPrimeFunction(unittest.TestCase):\n",
-       "    def test_negative_numbers(self):\n",
-       "        self.assertFalse(is_prime(-1))\n",
-       "        self.assertFalse(is_prime(-2))\n",
-       "        self.assertFalse(is_prime(-3))\n",
-       "\n",
-       "    def test_zero_and_one(self):\n",
-       "        self.assertFalse(is_prime(0))\n",
-       "        self.assertFalse(is_prime(1))\n",
-       "\n",
-       "    def test_two(self):\n",
-       "        self.assertTrue(is_prime(2))\n",
-       "\n",
-       "    def test_even_numbers(self):\n",
-       "        self.assertFalse(is_prime(4))\n",
-       "        self.assertFalse(is_prime(6))\n",
-       "        self.assertFalse(is_prime(8))\n",
-       "\n",
-       "    def test_prime_numbers(self):\n",
-       "        self.assertTrue(is_prime(3))\n",
-       "        self.assertTrue(is_prime(5))\n",
-       "        self.assertTrue(is_prime(7))\n",
-       "        self.assertTrue(is_prime(11))\n",
-       "        self.assertTrue(is_prime(13))\n",
-       "\n",
-       "    def test_large_prime_numbers(self):\n",
-       "        self.assertTrue(is_prime(104729))\n",
-       "        self.assertTrue(is_prime(105013))\n",
-       "\n",
-       "if __name__ == '__main__':\n",
-       "    unittest.main()\n",
-       "```"
-      ],
-      "text/plain": [
-       ""
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
+   "outputs": [],
    "source": [
     "display(Markdown(\"```python\\n\" + generated_test + \"\\n```\"))"
    ]
@@ -405,7 +283,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": null,
    "id": "122e923cb07fd5f4",
    "metadata": {
     "collapsed": false,
@@ -413,38 +291,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:groq._base_client:Request options: {'method': 'post', 'url': '/openai/v1/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': 'You are not a tracked agent'}, {'role': 'user', 'content': 'Say hello'}], 'model': 'llama3-70b-8192'}}\n",
-      "DEBUG:groq._base_client:Sending HTTP Request: POST https://api.groq.com/openai/v1/chat/completions\n",
-      "DEBUG:httpcore.http11:send_request_headers.started request=\n",
-      "DEBUG:httpcore.http11:send_request_headers.complete\n",
-      "DEBUG:httpcore.http11:send_request_body.started request=\n",
-      "DEBUG:httpcore.http11:send_request_body.complete\n",
-      "DEBUG:httpcore.http11:receive_response_headers.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Sun, 21 Jul 2024 05:55:24 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'Cache-Control', b'private, max-age=0, no-store, no-cache, must-revalidate'), (b'vary', b'Origin'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'30000'), (b'x-ratelimit-remaining-requests', b'49998'), (b'x-ratelimit-remaining-tokens', b'29982'), (b'x-ratelimit-reset-requests', b'3.318s'), (b'x-ratelimit-reset-tokens', b'36ms'), (b'x-request-id', b'req_01j39xqvrgem4bfd3gqybths6c'), (b'via', b'1.1 google'), (b'alt-svc', b'h3=\":443\"; ma=86400'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Server', b'cloudflare'), (b'CF-RAY', b'8a68f11e6dd59652-SJC'), (b'Content-Encoding', b'gzip')])\n",
-      "INFO:httpx:HTTP Request: POST https://api.groq.com/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
-      "DEBUG:httpcore.http11:receive_response_body.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_body.complete\n",
-      "DEBUG:httpcore.http11:response_closed.started\n",
-      "DEBUG:httpcore.http11:response_closed.complete\n",
-      "DEBUG:groq._base_client:HTTP Response: POST https://api.groq.com/openai/v1/chat/completions \"200 OK\" Headers({'date': 'Sun, 21 Jul 2024 05:55:24 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'cache-control': 'private, max-age=0, no-store, no-cache, must-revalidate', 'vary': 'Origin', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '30000', 'x-ratelimit-remaining-requests': '49998', 'x-ratelimit-remaining-tokens': '29982', 'x-ratelimit-reset-requests': '3.318s', 'x-ratelimit-reset-tokens': '36ms', 'x-request-id': 'req_01j39xqvrgem4bfd3gqybths6c', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'server': 'cloudflare', 'cf-ray': '8a68f11e6dd59652-SJC', 'content-encoding': 'gzip'})\n"
-     ]
-    },
-    {
-     "data": {
-      "text/plain": [
-       "'Hello!'"
-      ]
-     },
-     "execution_count": 10,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "res = groq_client.chat.completions.create(\n",
     "    model=\"llama3-70b-8192\",\n",
@@ -468,14 +315,6 @@
    "source": [
     "You'll notice that we didn't log an agent name, so the AgentOps backend will assign it to the Default Agent for the session!"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "d7a167c1-61f3-4499-8790-ec001e361e39",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -494,7 +333,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.9"
+   "version": "3.12.3"
   }
  },
  "nbformat": 4,
diff --git a/examples/multi_session_llm.ipynb b/examples/multi_session_llm.ipynb
index 6013be802..a13cd65ea 100644
--- a/examples/multi_session_llm.ipynb
+++ b/examples/multi_session_llm.ipynb
@@ -2,14 +2,42 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "source": [
-    "# Multiple Concurrent Sessions\n",
-    "This example will show you how to run multiple sessions concurrently, assigning LLM cals to a specific session."
-   ],
+   "id": "a0fe80a38dec2f7b",
    "metadata": {
     "collapsed": false
    },
-   "id": "a0fe80a38dec2f7b"
+   "source": [
+    "# Multiple Concurrent Sessions\n",
+    "This example will show you how to run multiple sessions concurrently, assigning LLM cals to a specific session."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "ef25b661",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f507526f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "b6abd496",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
   },
   {
    "cell_type": "code",
@@ -20,49 +48,73 @@
    },
    "outputs": [],
    "source": [
-    "import agentops\n",
     "from openai import OpenAI\n",
-    "from dotenv import load_dotenv\n",
+    "import agentops\n",
     "from agentops import ActionEvent\n",
-    "\n",
-    "load_dotenv()"
+    "import os\n",
+    "from dotenv import load_dotenv"
    ]
   },
   {
    "cell_type": "markdown",
+   "id": "c1da7e59",
+   "metadata": {},
    "source": [
-    "First, of course, lets init AgentOps. We're going to bypass creating a session automatically for the sake of showing it below."
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "da9cf64965c86ee9"
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "8325866b",
+   "metadata": {},
    "outputs": [],
    "source": [
-    "agentops.init(auto_start_session=False)\n",
-    "openai = OpenAI()"
-   ],
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "da9cf64965c86ee9",
    "metadata": {
     "collapsed": false
    },
+   "source": [
+    "Then, of course, lets init AgentOps. We're going to bypass creating a session automatically for the sake of showing it below."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "id": "39af2cd027ce268",
-   "execution_count": null
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, auto_start_session=False)\n",
+    "openai = OpenAI()"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Now lets create two sessions, each with an identifiable tag."
-   ],
+   "id": "9501d298aec35510",
    "metadata": {
     "collapsed": false
    },
-   "id": "9501d298aec35510"
+   "source": [
+    "Now lets create two sessions, each with an identifiable tag."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "4f24d06dd29579ff",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "session_1 = agentops.start_session(tags=[\"multi-session-test-1\"])\n",
@@ -70,49 +122,49 @@
     "\n",
     "print(\"session_id_1: {}\".format(session_1.session_id))\n",
     "print(\"session_id_2: {}\".format(session_2.session_id))"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "4f24d06dd29579ff",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "## LLM Calls\n",
-    "Now lets go ahead and make our first OpenAI LLM call. The challenge with having multiple sessions at the same time is that there is no way for AgentOps to know what LLM call is intended to pertain to what active session. This means we need to do a little extra work in one of two ways."
-   ],
+   "id": "38f373b7a8878a68",
    "metadata": {
     "collapsed": false
    },
-   "id": "38f373b7a8878a68"
+   "source": [
+    "## LLM Calls\n",
+    "Now lets go ahead and make our first OpenAI LLM call. The challenge with having multiple sessions at the same time is that there is no way for AgentOps to know what LLM call is intended to pertain to what active session. This means we need to do a little extra work in one of two ways."
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "messages = [{\"role\": \"user\", \"content\": \"Hello\"}]"
-   ],
+   "execution_count": null,
+   "id": "8a2d65f5fcdb137",
    "metadata": {
     "collapsed": false
    },
-   "id": "8a2d65f5fcdb137",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "messages = [{\"role\": \"user\", \"content\": \"Hello\"}]"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "### Patching Function\n",
-    "This method involves wrapping the LLM call withing a function on session. It can look a little counter-intuitive, but it easily tells us what session the call belongs to."
-   ],
+   "id": "e1859e37b65669b2",
    "metadata": {
     "collapsed": false
    },
-   "id": "e1859e37b65669b2"
+   "source": [
+    "### Patching Function\n",
+    "This method involves wrapping the LLM call withing a function on session. It can look a little counter-intuitive, but it easily tells us what session the call belongs to."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "106a1c899602bd33",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "# option 1: use session.patch\n",
@@ -121,26 +173,26 @@
     "    messages=messages,\n",
     "    temperature=0.5,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "106a1c899602bd33",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "### Create patched function\n",
-    "If you're using the create function multiple times, you can create a new function with the same method"
-   ],
+   "id": "3e129661929e8368",
    "metadata": {
     "collapsed": false
    },
-   "id": "3e129661929e8368"
+   "source": [
+    "### Create patched function\n",
+    "If you're using the create function multiple times, you can create a new function with the same method"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "be3b866ee04ef767",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "observed_create = session_1.patch(openai.chat.completions.create)\n",
@@ -149,102 +201,99 @@
     "    messages=messages,\n",
     "    temperature=0.5,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "be3b866ee04ef767",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "### Keyword Argument\n",
-    "Alternatively, you can also pass the session into the LLM function call as a keyword argument. While this method works and is a bit more readable, it is not a \"pythonic\" pattern and can lead to linting errors in the code, as the base function is not expecting a `session` keyword."
-   ],
+   "id": "ec03dbfb7a185d1d",
    "metadata": {
     "collapsed": false
    },
-   "id": "ec03dbfb7a185d1d"
+   "source": [
+    "### Keyword Argument\n",
+    "Alternatively, you can also pass the session into the LLM function call as a keyword argument. While this method works and is a bit more readable, it is not a \"pythonic\" pattern and can lead to linting errors in the code, as the base function is not expecting a `session` keyword."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "4ad4c7629509b4be",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "# option 2: add session as a keyword argument\n",
     "response2 = openai.chat.completions.create(\n",
     "    model=\"gpt-3.5-turbo\", messages=messages, temperature=0.5, session=session_2\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "4ad4c7629509b4be"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "e6de84850aa2e135",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "## Recording Events\n",
     "Outside of LLM calls, there are plenty of other events that we want to track. You can learn more about these events [here](https://docs.agentops.ai/v1/concepts/events).\n",
     "\n",
     "Recording these events on a session is as simple as `session.record(...)`"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "e6de84850aa2e135"
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "session_1.record(ActionEvent(action_type=\"test event\"))"
-   ],
+   "execution_count": null,
+   "id": "964e3073bac33223",
    "metadata": {
     "collapsed": false
    },
-   "id": "964e3073bac33223"
+   "outputs": [],
+   "source": [
+    "session_1.record(ActionEvent(action_type=\"test event\"))"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Now let's go ahead and end the sessions"
-   ],
+   "id": "43ac0b9b99eab5c7",
    "metadata": {
     "collapsed": false
    },
-   "id": "43ac0b9b99eab5c7"
+   "source": [
+    "Now let's go ahead and end the sessions"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "7e3050abcb72421b",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "session_1.end_session(end_state=\"Success\")\n",
     "session_2.end_session(end_state=\"Success\")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "7e3050abcb72421b",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "If you look in the AgentOps dashboard for these sessions, you will see two unique sessions, both with one LLM Event each, one with an Action Event as well."
-   ],
+   "id": "53ea2b8dfee6270a",
    "metadata": {
     "collapsed": false
    },
-   "id": "53ea2b8dfee6270a"
+   "source": [
+    "If you look in the AgentOps dashboard for these sessions, you will see two unique sessions, both with one LLM Event each, one with an Action Event as well."
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [],
+   "id": "dbc7483434f8c147",
    "metadata": {
     "collapsed": false
    },
-   "id": "dbc7483434f8c147"
+   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/multion/Autonomous_web_browsing.ipynb b/examples/multion/Autonomous_web_browsing.ipynb
index 10c78d1d6..f5b6a269a 100644
--- a/examples/multion/Autonomous_web_browsing.ipynb
+++ b/examples/multion/Autonomous_web_browsing.ipynb
@@ -15,25 +15,30 @@
     "Furthermore, events and LLM calls in your Python program will be tracked as well."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Install dependencies\n",
     "%pip install -U multion\n",
     "%pip install -U agentops\n",
-    "%pip install -U openai"
+    "%pip install -U openai\n",
+    "%pip install -U python-dotenv"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Tracking MultiOn events with AgentOps\n",
-    "\n",
-    "When an `agentops_api_key` is provided, MultiOn will automatically start an AgentOps session and record events."
+    "Then import them"
    ]
   },
   {
@@ -42,11 +47,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
+    "from multion.client import MultiOn\n",
+    "from multion.core.request_options import RequestOptions\n",
+    "import openai\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -55,14 +68,30 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from multion.client import MultiOn\n",
-    "from multion.core.request_options import RequestOptions\n",
-    "import openai\n",
-    "import agentops\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Tracking MultiOn events with AgentOps\n",
     "\n",
+    "When an `agentops_api_key` is provided, MultiOn will automatically start an AgentOps session and record events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "cmd = \"what three things do i get with agentops\"\n",
     "request_options = RequestOptions(\n",
@@ -77,7 +106,10 @@
     "    request_options=request_options,\n",
     ")\n",
     "\n",
-    "print(browse_response.message)"
+    "print(browse_response.message)\n",
+    "\n",
+    "# End session to see your dashboard\n",
+    "agentops.end_session(\"Success\")"
    ]
   },
   {
@@ -94,7 +126,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "agentops.init(auto_start_session=False, tags=[\"MultiOn browse example\"])"
+    "agentops.init(AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"])"
    ]
   },
   {
@@ -111,8 +143,8 @@
    "outputs": [],
    "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "cmd = \"what three things do i get with agentops\"\n",
     "request_options = RequestOptions(\n",
@@ -161,7 +193,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# End session to see your dashboard\n",
     "agentops.end_session(\"Success\")"
    ]
   },
@@ -179,11 +210,6 @@
     "\n",
     "![image.png](attachment:image.png)"
    ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/multion/Sample_browsing_agent.ipynb b/examples/multion/Sample_browsing_agent.ipynb
index ef964bffc..a0b890fc5 100644
--- a/examples/multion/Sample_browsing_agent.ipynb
+++ b/examples/multion/Sample_browsing_agent.ipynb
@@ -15,16 +15,30 @@
     "Furthermore, events and LLM calls in your Python program will be tracked as well."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Install dependencies\n",
     "%pip install -U multion\n",
     "%pip install -U agentops\n",
-    "%pip install -U openai"
+    "%pip install -U openai\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
    ]
   },
   {
@@ -33,11 +47,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import os\n",
     "from multion.client import MultiOn\n",
     "from multion.core.request_options import RequestOptions\n",
     "import openai\n",
-    "import agentops"
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -46,11 +68,20 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
-    "import os\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Check your session\n",
+    "Check your session on [AgentOps](https://app.agentops.ai). This session should include the MultiOn browse action and the OpenAI call.\n",
     "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "![image.png](attachment:image.png)"
    ]
   },
   {
@@ -67,7 +98,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "agentops.init(auto_start_session=False, tags=[\"MultiOn browse example\"])"
+    "agentops.init(AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"])"
    ]
   },
   {
@@ -84,8 +115,8 @@
    "outputs": [],
    "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "cmd = \"what three things do i get with agentops\"\n",
     "request_options = RequestOptions(\n",
diff --git a/examples/multion/Step_by_step_web_browsing.ipynb b/examples/multion/Step_by_step_web_browsing.ipynb
index 652a867fb..7069ad9e6 100644
--- a/examples/multion/Step_by_step_web_browsing.ipynb
+++ b/examples/multion/Step_by_step_web_browsing.ipynb
@@ -12,6 +12,13 @@
     "This example shows how to use MultiOn's session creator to launch a self-directed browser agent that accomplishes a specified objective using Step Mode. MultiOn agents can either accomplish tasks fully autonomously or managed one step at a time. In this example, we will launch a MutliOn agent and manage each individual step. "
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -19,17 +26,15 @@
    "outputs": [],
    "source": [
     "%pip install -U multion\n",
-    "%pip install -U agentops"
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Session streaming\n",
-    "In this example, we'll use MultiOn to stream individual steps to accomplish a task. To track your runs in the AgentOps dashboard, specify an `agentops_api_key` when initializing `MultiOn()`\n",
-    "\n",
-    "You can run MultiOn without running `agentops.init()`. However, you will only see events from MultiOn, and not any from your own agent.\n"
+    "Then import them"
    ]
   },
   {
@@ -38,11 +43,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
+    "import multion\n",
+    "from multion.client import MultiOn\n",
+    "from multion.sessions.types.sessions_step_request_browser_params import (\n",
+    "    SessionsStepRequestBrowserParams,\n",
+    ")\n",
+    "from multion.core.request_options import RequestOptions\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -51,16 +67,40 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import multion\n",
-    "from multion.client import MultiOn\n",
-    "from multion.sessions.types.sessions_step_request_browser_params import (\n",
-    "    SessionsStepRequestBrowserParams,\n",
-    ")\n",
-    "from multion.core.request_options import RequestOptions\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Check your session\n",
+    "Check your session on [AgentOps](https://app.agentops.ai). This session should include the MultiOn browse action and the OpenAI call.\n",
+    "\n",
+    "![image.png](attachment:image.png)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Session streaming\n",
+    "In this example, we'll use MultiOn to stream individual steps to accomplish a task. To track your runs in the AgentOps dashboard, specify an `agentops_api_key` when initializing `MultiOn()`\n",
     "\n",
+    "You can run MultiOn without running `agentops.init()`. However, you will only see events from MultiOn, and not any from your own agent.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "\n",
     "url = \"https://www.agentops.ai/\"\n",
@@ -106,18 +146,6 @@
     "Step stream is just like step, but it streams responses in the same way a streamed LLM response is received. Instead of waiting for the entire step to complete, MultiOn will return work in progress. To track your runs in the AgentOps dashboard, specify an `agentops_api_key` when initializing `MultiOn()`"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"e8cbbd0f8fa042f49f267a44bf97425c\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"a640373b-30ae-4655-a1f3-5caa882a8721\""
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -135,8 +163,8 @@
     "import os\n",
     "\n",
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "\n",
     "url = \"https://www.agentops.ai/\"\n",
diff --git a/examples/multion/Webpage_data_retrieval.ipynb b/examples/multion/Webpage_data_retrieval.ipynb
index 352090aa0..9fc89741e 100644
--- a/examples/multion/Webpage_data_retrieval.ipynb
+++ b/examples/multion/Webpage_data_retrieval.ipynb
@@ -12,6 +12,13 @@
     "This example shows how to use MultiOn's session creator to launch a self-directed browser agent that accomplishes a specified objective."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -19,7 +26,15 @@
    "outputs": [],
    "source": [
     "%pip install -U multion\n",
-    "%pip install -U agentops"
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
    ]
   },
   {
@@ -28,11 +43,18 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
+    "import multion\n",
+    "from multion.client import MultiOn\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -41,12 +63,30 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import multion\n",
-    "from multion.client import MultiOn\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Check your session\n",
+    "Check your session on [AgentOps](https://app.agentops.ai). This session should include the MultiOn browse action and the OpenAI call.\n",
     "\n",
+    "![image.png](attachment:image.png)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "\n",
     "cmd = \"what three things do i get with agentops\"\n",
@@ -79,13 +119,6 @@
     "\n",
     "![AgentOps Multion Retrieve](https://github.com/AgentOps-AI/agentops/blob/main/docs/images/agentops-multion-retrieve.gif?raw=true)"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/openai-gpt.ipynb b/examples/openai-gpt.ipynb
index 6ed6e95eb..613880ab1 100644
--- a/examples/openai-gpt.ipynb
+++ b/examples/openai-gpt.ipynb
@@ -61,7 +61,7 @@
     "collapsed": false
    },
    "source": [
-    "Next, we'll grab our two API keys. You can use dotenv like below or however else you like to load environment variables"
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
diff --git a/examples/recording-events.ipynb b/examples/recording-events.ipynb
index b766a716b..7c744a3c5 100644
--- a/examples/recording-events.ipynb
+++ b/examples/recording-events.ipynb
@@ -10,7 +10,68 @@
     "# Recording Events\n",
     "AgentOps has a number of different [Event Types](https://docs.agentops.ai/v1/details/events)\n",
     "\n",
-    "AgentOps automatically instruments your LLM Calls from OpenAI, LiteLLM, and Cohere. Just make sure their SDKs are imported before initializing AgentOps like we see below"
+    "We automatically instrument your LLM Calls from OpenAI, LiteLLM, Cohere, and more. Just make sure their SDKs are imported before initializing AgentOps like we see below"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0c475b2e",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ef2a575d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "45f3c28f",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "54b0b276",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from openai import OpenAI\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "985ed1da",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0c3f4b1a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
    ]
   },
   {
@@ -22,14 +83,13 @@
    },
    "outputs": [],
    "source": [
-    "import agentops\n",
-    "import openai\n",
-    "\n",
-    "# Create new session\n",
+    "# Initialize the client, which will automatically start a session\n",
     "agentops.init()\n",
     "\n",
-    "# Optionally, we can add tags to the session\n",
-    "# agentops.init(tags=['Hello Tracker'])\n",
+    "# Optionally, we can add default tags to all sessions\n",
+    "# agentops.init(default_tags=['Hello Tracker'])\n",
+    "\n",
+    "openai = OpenAI()\n",
     "\n",
     "message = ({\"role\": \"user\", \"content\": \"Hello\"},)\n",
     "response = openai.chat.completions.create(\n",
@@ -68,12 +128,10 @@
    "source": [
     "from agentops import record_action\n",
     "\n",
-    "\n",
     "@record_action(\"add numbers\")\n",
     "def add(x, y):\n",
     "    return x + y\n",
     "\n",
-    "\n",
     "add(2, 4)"
    ]
   },
@@ -132,7 +190,7 @@
     "    tool_event = ToolEvent(\n",
     "        name=\"scrape_website\", params={\"url\": url}\n",
     "    )  # the start timestamp is set when the obj is created\n",
-    "    result = integration.scrape_website(data)  # perform tool logic\n",
+    "    result = \"scraped data\"  # perform tool logic\n",
     "    tool_event.returns = result\n",
     "    record(tool_event)"
    ]
diff --git a/tests/core_manual_tests/agentchat_agentops.ipynb b/tests/core_manual_tests/agentchat_agentops.ipynb
index 43283d13f..3450ca6d9 100644
--- a/tests/core_manual_tests/agentchat_agentops.ipynb
+++ b/tests/core_manual_tests/agentchat_agentops.ipynb
@@ -47,20 +47,63 @@
   },
   {
    "cell_type": "markdown",
-   "id": "8d9451f4",
+   "id": "de096590",
    "metadata": {},
    "source": [
-    "````{=mdx}\n",
-    ":::info Requirements\n",
-    "Some extra dependencies are needed for this notebook, which can be installed via pip:\n",
-    "\n",
-    "```bash\n",
-    "pip install pyautogen agentops\n",
-    "```\n",
-    "\n",
-    "For more information, please refer to the [installation guide](/docs/installation/).\n",
-    ":::\n",
-    "````"
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f59d1440",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U pyautogen\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f7f00197",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5adf56c9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from autogen import ConversableAgent, UserProxyAgent, config_list_from_json\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "22ef3e34",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a2bbb306",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
    ]
   },
   {
@@ -95,7 +138,9 @@
     },
     {
      "data": {
-      "text/plain": "UUID('8771cfe1-d607-4987-8398-161cb5dbb5cf')"
+      "text/plain": [
+       "UUID('8771cfe1-d607-4987-8398-161cb5dbb5cf')"
+      ]
      },
      "execution_count": 2,
      "metadata": {},
@@ -103,11 +148,7 @@
     }
    ],
    "source": [
-    "import agentops\n",
-    "\n",
-    "from autogen import ConversableAgent, UserProxyAgent, config_list_from_json\n",
-    "\n",
-    "agentops.init(api_key=\"6f7b89eb-286f-44ed-af9c-a166358e5561\")"
+    "agentops.init(AGENTOPS_API_KEY)"
    ]
   },
   {
@@ -171,12 +212,8 @@
     }
    ],
    "source": [
-    "import agentops\n",
-    "\n",
     "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n",
-    "agentops.init(\n",
-    "    tags=[\"simple-autogen-example\"], api_key=\"6f7b89eb-286f-44ed-af9c-a166358e5561\"\n",
-    ")\n",
+    "agentops.init(default_tags=[\"simple-autogen-example\"])\n",
     "\n",
     "agentops.start_session()\n",
     "\n",
diff --git a/tests/core_manual_tests/upsert_events.py.ipynb b/tests/core_manual_tests/upsert_events.py.ipynb
deleted file mode 100644
index 434008ae1..000000000
--- a/tests/core_manual_tests/upsert_events.py.ipynb
+++ /dev/null
@@ -1,50 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "id": "initial_id",
-   "metadata": {
-    "collapsed": true,
-    "ExecuteTime": {
-     "end_time": "2024-05-07T01:49:47.567382Z",
-     "start_time": "2024-05-07T01:49:47.148365Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "import agentops"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "outputs": [],
-   "source": [],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "cc69b52023168f58"
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.6"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}