diff --git a/.github/workflows/test-notebooks.yml b/.github/workflows/test-notebooks.yml index feb390e3..1d548d1c 100644 --- a/.github/workflows/test-notebooks.yml +++ b/.github/workflows/test-notebooks.yml @@ -1,59 +1,91 @@ -name: Run Notebook and Check Logs - +name: Test Notebooks on: push: branches: [ main ] pull_request: branches: [ main ] - jobs: - run-notebook-and-check-logs: + test-notebooks: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.8'] + python-version: ['3.11'] fail-fast: false - steps: - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies run: | python -m pip install --upgrade pip pip install -U jupyter - - name: Create .env file run: | echo "OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> .env echo "AGENTOPS_API_KEY=${{ secrets.AGENTOPS_API_KEY }}" >> .env echo "CO_API_KEY=${{ secrets.CO_API_KEY }}" >> .env - - - name: Run notebook + echo "GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}" >> .env + echo "MULTION_API_KEY=${{ secrets.MULTION_API_KEY }}" >> .env + echo "SERPER_API_KEY=${{ secrets.SERPER_API_KEY }}" >> .env + - name: Run notebooks and check for errors run: | - jupyter execute examples/openai-gpt.ipynb + mkdir -p logs + exit_code=0 - - name: Check for errors and warnings - run: | - if [ -f agentops.log ]; then - if grep -E "ERROR|WARNING" agentops.log; then - echo "Errors or warnings found in agentops.log for Python ${{ matrix.python-version }}" - exit 1 + exclude_notebooks=( + "./examples/crew/job_posting.ipynb" + ) + + for notebook in $(find . -name '*.ipynb'); do + + skip=false + for excluded in "${exclude_notebooks[@]}"; do + if [[ "$notebook" == "$excluded" ]]; then + skip=true + echo "Skipping excluded notebook: $notebook" + break + fi + done + + $skip && continue + + notebook_name=$(basename "$notebook" .ipynb) + notebook_path=$(realpath "$notebook") + notebook_dir=$(dirname "$notebook_path") + + # Run the notebook + jupyter execute "$notebook_path" || true + + # Check if agentops.log was created + if [ -f "${notebook_dir}/agentops.log" ]; then + dest_log="logs/agentops-${notebook_name}.log" + mv "${notebook_dir}/agentops.log" "$dest_log" + + # Check agentops log for errors or warnings + if grep -E "ERROR|WARNING" "$dest_log"; then + echo "Errors or warnings found in $dest_log for Python ${{ matrix.python-version }}" + exit_code=1 + else + echo "No errors or warnings found in $dest_log for Python ${{ matrix.python-version }}" + fi else - echo "No errors or warnings found in agentops.log for Python ${{ matrix.python-version }}" + echo "No agentops.log generated for $notebook_name" fi - else - echo "agentops.log file not found. Assuming successful execution without logging." + done + + # Check if any logs were found + if [ $(find logs -name 'agentops-*.log' | wc -l) -eq 0 ]; then + echo "No agentops.log files were generated for any notebook" fi - - name: Upload log as artifact (if exists) + exit $exit_code + + - name: Upload logs as artifacts uses: actions/upload-artifact@v4 if: always() with: - name: agentops-log-${{ matrix.python-version }} - path: agentops.log - if-no-files-found: ignore + name: notebook-logs-${{ matrix.python-version }} + path: logs/agentops-*.log + if-no-files-found: warn \ No newline at end of file diff --git a/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html b/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html index 09a3f830..c7c5143d 100644 --- a/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html +++ b/docs/v1/examples/notebooks/multion/Step_by_step_web_browsing.html @@ -330,8 +330,8 @@

Step Stream

import os
 
-os.environ["MULTION_API_KEY"] = "e8cbbd0f8fa042f49f267a44bf97425c"
-os.environ["AGENTOPS_API_KEY"] = "a640373b-30ae-4655-a1f3-5caa882a8721"
+os.environ["MULTION_API_KEY"] = +os.environ["AGENTOPS_API_KEY"] =
\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n",
-    "agentops.init(api_key=\"...\", tags=[\"simple-autogen-example\"])\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"simple-autogen-example\"])\n",
     "\n",
     "print(\"AgentOps is now running. You can view your session in the link above\")"
    ]
@@ -63,7 +105,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 5,
    "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f",
    "metadata": {},
    "outputs": [
@@ -71,95 +113,29 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "\u001b[33magent\u001b[0m (to user):\n",
-      "\n",
-      "How can I help you today?\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation:  Tell me a joke about AgentOps\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33muser\u001b[0m (to agent):\n",
-      "\n",
-      "Tell me a joke about AgentOps\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
+      "\u001b[31m\n",
+      ">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
       "\u001b[31m\n",
       ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33magent\u001b[0m (to user):\n",
-      "\n",
-      "Why don't AgentOps teams ever play hide and seek?\n",
-      "\n",
-      "Because good luck hiding when they always know where everyone is supposed to be!\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation:  Another\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
       "\u001b[33muser\u001b[0m (to agent):\n",
       "\n",
-      "Another\n",
+      "\n",
       "\n",
       "--------------------------------------------------------------------------------\n",
       "\u001b[31m\n",
       ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
       "\u001b[33magent\u001b[0m (to user):\n",
       "\n",
-      "Why did the AgentOps team bring a ladder to work?\n",
-      "\n",
-      "Because they’re always reaching for high-level optimizations!\n",
+      "It seems there might still be an issue. If you need assistance or have questions later on, don't hesitate to reach out. I'm here to help whenever you're ready!\n",
       "\n",
       "--------------------------------------------------------------------------------\n"
      ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Provide feedback to agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation:  exit\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: This run's cost $0.001080\n",
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=24c5d9f6-fb82-41e6-a468-2dc74a5318a3\u001b[0m\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Success! Visit your AgentOps dashboard to see the replay\n"
-     ]
     }
    ],
    "source": [
-    "# Define an openai api key for the agent configurations\n",
-    "openai_api_key = \"...\"\n",
+    "# Define model, openai api key, tags, etc in the agent configuration\n",
     "config_list = [\n",
-    "    {\"model\": \"gpt-4-turbo\", \"api_key\": openai_api_key, \"tags\": [\"gpt-4\", \"tool\"]}\n",
+    "    {\"model\": \"gpt-4-turbo\", \"api_key\": OPENAI_API_KEY, \"tags\": [\"gpt-4\", \"tool\"]}\n",
     "]\n",
     "\n",
     "# Create the agent that uses the LLM.\n",
@@ -185,14 +161,6 @@
     "\n",
     "The dashboard will display LLM events for each message sent by each agent, including those made by the human user."
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "72993a75-1031-4874-aa26-0ef816a3256c",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -211,7 +179,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.12.3"
   }
  },
  "nbformat": 4,
diff --git a/examples/autogen/MathAgent.ipynb b/examples/autogen/MathAgent.ipynb
index d7100c30..c0e0c99d 100644
--- a/examples/autogen/MathAgent.ipynb
+++ b/examples/autogen/MathAgent.ipynb
@@ -14,33 +14,77 @@
     "AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "083244fa",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 1,
-   "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+   "execution_count": null,
+   "id": "9c8104ad",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U pyautogen\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "cc44e459",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7672f591",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=51556b00-428b-4cae-b815-1320ebc7e810\u001b[0m\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "AgentOps is now running. You can view your session in the link above\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
+    "from autogen import ConversableAgent\n",
     "from typing import Annotated, Literal\n",
     "from autogen import ConversableAgent, register_function\n",
     "import agentops\n",
-    "\n",
-    "agentops.init(api_key=\"...\", tags=[\"autogen-tool-example\"])\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "24f8bd70",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9eeaef34",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"autogen-tool-example\"])\n",
     "\n",
     "print(\"AgentOps is now running. You can view your session in the link above\")"
    ]
@@ -62,165 +106,23 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f",
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Define an openai api key for the agent configurations\n",
-    "openai_api_key = \"...\"\n",
+    "# Define model, openai api key, tags, etc in the agent configuration\n",
     "config_list = [\n",
-    "    {\"model\": \"gpt-4-turbo\", \"api_key\": openai_api_key, \"tags\": [\"gpt-4\", \"tool\"]}\n",
+    "    {\"model\": \"gpt-4-turbo\", \"api_key\": OPENAI_API_KEY, \"tags\": [\"gpt-4\", \"tool\"]}\n",
     "]"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "9e4dfe37-85e0-4035-a314-3459c6e378c4",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: Cannot start session - session already started\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "What is (1423 - 123) / 3 + (32 + 23) * 5?\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/Users/reibs/Projects/autogen/autogen/agentchat/conversable_agent.py:2489: UserWarning: Function 'calculator' is being overridden.\n",
-      "  warnings.warn(f\"Function '{tool_sig['function']['name']}' is being overridden.\", UserWarning)\n",
-      "/Users/reibs/Projects/autogen/autogen/agentchat/conversable_agent.py:2408: UserWarning: Function 'calculator' is being overridden.\n",
-      "  warnings.warn(f\"Function '{name}' is being overridden.\", UserWarning)\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "\u001b[32m***** Suggested tool call (call_pSJXJKu1qbfRV4SVNHzTaG1z): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 1423, \"b\": 123, \"operator\": \"-\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\u001b[32m***** Suggested tool call (call_kHRXi8vq5XsZSrGDnVTA1oy7): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 32, \"b\": 23, \"operator\": \"+\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_pSJXJKu1qbfRV4SVNHzTaG1z) *****\u001b[0m\n",
-      "1300\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_kHRXi8vq5XsZSrGDnVTA1oy7) *****\u001b[0m\n",
-      "55\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "\u001b[32m***** Suggested tool call (call_srDG3bYDpkdoIVn1mVVJNTJj): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 1300, \"b\": 3, \"operator\": \"/\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\u001b[32m***** Suggested tool call (call_jXJHjxZlnsHEbLaG4hQXUX1v): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\": 55, \"b\": 5, \"operator\": \"*\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_srDG3bYDpkdoIVn1mVVJNTJj) *****\u001b[0m\n",
-      "433\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_jXJHjxZlnsHEbLaG4hQXUX1v) *****\u001b[0m\n",
-      "275\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "\u001b[32m***** Suggested tool call (call_jKyiL6oizDZPfX16hhVi6pa3): calculator *****\u001b[0m\n",
-      "Arguments: \n",
-      "{\"a\":433,\"b\":275,\"operator\":\"+\"}\n",
-      "\u001b[32m***************************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[35m\n",
-      ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[33mUser\u001b[0m (to Assistant):\n",
-      "\n",
-      "\u001b[32m***** Response from calling tool (call_jKyiL6oizDZPfX16hhVi6pa3) *****\u001b[0m\n",
-      "708\n",
-      "\u001b[32m**********************************************************************\u001b[0m\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n",
-      "\u001b[31m\n",
-      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
-      "\u001b[33mAssistant\u001b[0m (to User):\n",
-      "\n",
-      "The result of the expression \\((1423 - 123) / 3 + (32 + 23) * 5\\) is 708. \n",
-      "\n",
-      "'TERMINATE'\n",
-      "\n",
-      "--------------------------------------------------------------------------------\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "πŸ–‡ AgentOps: This run's cost $0.000600\n",
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=51556b00-428b-4cae-b815-1320ebc7e810\u001b[0m\u001b[0m\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "Operator = Literal[\"+\", \"-\", \"*\", \"/\"]\n",
     "\n",
@@ -288,14 +190,6 @@
     "* Each use of the calculator tool\n",
     "* Each call to OpenAI for LLM use"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0b8919ec-ff81-4c94-95de-0d2c5dabbdd9",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/cohere/cohere_example.ipynb b/examples/cohere/cohere_example.ipynb
new file mode 100644
index 00000000..96ff01d6
--- /dev/null
+++ b/examples/cohere/cohere_example.ipynb
@@ -0,0 +1,152 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Cohere example"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U cohere\n",
+    "%pip install -U agentops"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import cohere\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "CO_API_KEY = os.getenv(\"CO_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"cohere-example\"])\n",
+    "co = cohere.Client()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "stream = co.chat_stream(\n",
+    "    message=\"Tell me everything you can about AgentOps\",\n",
+    "    connectors=[{\"id\": \"web-search\"}],\n",
+    ")\n",
+    "\n",
+    "response = \"\"\n",
+    "for event in stream:\n",
+    "    if event.event_type == \"text-generation\":\n",
+    "        response += event.text\n",
+    "        print(event.text, end=\"\")\n",
+    "    elif event.event_type == \"stream-end\":\n",
+    "        print(\"\\n\")\n",
+    "        print(event)\n",
+    "        print(\"\\n\")\n",
+    "\n",
+    "stream = co.chat_stream(\n",
+    "    chat_history=[\n",
+    "        {\n",
+    "            \"role\": \"SYSTEM\",\n",
+    "            \"message\": \"You are Adam Silverman: die-hard advocate of AgentOps, leader in AI Agent observability\",\n",
+    "        },\n",
+    "        {\n",
+    "            \"role\": \"CHATBOT\",\n",
+    "            \"message\": \"How's your day going? I'd like to tell you about AgentOps: {response}\",\n",
+    "        },\n",
+    "    ],\n",
+    "    message=\"Based on your newfound knowledge of AgentOps, is Cohere a suitable partner for them and how could they integrate?\",\n",
+    "    connectors=[{\"id\": \"web-search\"}],\n",
+    ")\n",
+    "\n",
+    "response = \"\"\n",
+    "for event in stream:\n",
+    "    if event.event_type == \"text-generation\":\n",
+    "        response += event.text\n",
+    "        print(event.text, end=\"\")\n",
+    "    elif event.event_type == \"stream-end\":\n",
+    "        print(\"\\n\")\n",
+    "        print(event)\n",
+    "        print(\"\\n\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.end_session(\"Success\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/cohere_example.py b/examples/cohere_example.py
deleted file mode 100644
index 4c8557cd..00000000
--- a/examples/cohere_example.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import cohere
-import agentops  # just
-from dotenv import load_dotenv
-
-load_dotenv()
-
-agentops.init(tags=["cohere", "agentops-demo"])  # three
-co = cohere.Client()
-
-stream = co.chat_stream(
-    message="Tell me everything you can about AgentOps",
-    connectors=[{"id": "web-search"}],
-)
-
-response = ""
-for event in stream:
-    if event.event_type == "text-generation":
-        response += event.text
-        print(event.text, end="")
-    elif event.event_type == "stream-end":
-        print("\n")
-        print(event)
-        print("\n")
-
-stream = co.chat_stream(
-    chat_history=[
-        {
-            "role": "SYSTEM",
-            "message": "You are Adam Silverman: die-hard advocate of AgentOps, leader in AI Agent observability",
-        },
-        {
-            "role": "CHATBOT",
-            "message": "How's your day going? I'd like to tell you about AgentOps: {response}",
-        },
-    ],
-    message="Based on your newfound knowledge of AgentOps, is Cohere a suitable partner for them and how could they integrate?",
-    connectors=[{"id": "web-search"}],
-)
-
-response = ""
-for event in stream:
-    if event.event_type == "text-generation":
-        response += event.text
-        print(event.text, end="")
-    elif event.event_type == "stream-end":
-        print("\n")
-        print(event)
-        print("\n")
-
-agentops.end_session("Success")  # lines
diff --git a/examples/crew/README.md b/examples/crew/README.md
new file mode 100644
index 00000000..ef06186d
--- /dev/null
+++ b/examples/crew/README.md
@@ -0,0 +1,17 @@
+# AI Crew for Reviewing Markdown Syntax
+
+## Introduction
+This project is an example using the CrewAI framework to automate the process reviewing a markdown file for syntax issues. A general assistant leverages a custom tool to get a list of markdown linting errors. It then summarizes those errors into a list of changes to make to the document.
+
+## Running the Script
+This example uses the OpenAI API to call a model. This can be through a locally hosted solution like LM Studio, or the Open AI API endpoint with your API key. 
+
+- **Configure Environment**: Copy ``.env.example` and set up the environment variables the model, endpoint url, and api key.
+- **Install Dependencies**: Run `poetry install --no-root`.
+- **Execute the Script**: Run `python main.py README.md` to see a list of recommended changes to this document.
+
+## Details & Explanation
+- **Running the Script**: Execute `python main.py `. The script will leverage the CrewAI framework to process the specified file and return a list of changes.
+
+## License
+This project is released under the MIT License.
\ No newline at end of file
diff --git a/examples/crew/job_posting.ipynb b/examples/crew/job_posting.ipynb
index f6f75eeb..5e41e61c 100644
--- a/examples/crew/job_posting.ipynb
+++ b/examples/crew/job_posting.ipynb
@@ -1,35 +1,428 @@
 {
  "cells": [
   {
-   "cell_type": "code",
-   "execution_count": null,
+   "cell_type": "markdown",
    "metadata": {},
-   "outputs": [],
    "source": [
-    "# %pip install -e ../.."
+    "First let's install the required packages"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 1,
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Collecting crewai[tools]\n",
+      "  Using cached crewai-0.41.1-py3-none-any.whl.metadata (13 kB)\n",
+      "Collecting appdirs<2.0.0,>=1.4.4 (from crewai[tools])\n",
+      "  Using cached appdirs-1.4.4-py2.py3-none-any.whl.metadata (9.0 kB)\n",
+      "Requirement already satisfied: click<9.0.0,>=8.1.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai[tools]) (8.1.7)\n",
+      "Collecting crewai-tools<0.5.0,>=0.4.26 (from crewai[tools])\n",
+      "  Using cached crewai_tools-0.4.26-py3-none-any.whl.metadata (4.6 kB)\n",
+      "Collecting embedchain<0.2.0,>=0.1.114 (from crewai[tools])\n",
+      "  Downloading embedchain-0.1.120-py3-none-any.whl.metadata (9.3 kB)\n",
+      "Collecting instructor==1.3.3 (from crewai[tools])\n",
+      "  Using cached instructor-1.3.3-py3-none-any.whl.metadata (13 kB)\n",
+      "Collecting json-repair<0.26.0,>=0.25.2 (from crewai[tools])\n",
+      "  Using cached json_repair-0.25.3-py3-none-any.whl.metadata (7.9 kB)\n",
+      "Collecting jsonref<2.0.0,>=1.1.0 (from crewai[tools])\n",
+      "  Using cached jsonref-1.1.0-py3-none-any.whl.metadata (2.7 kB)\n",
+      "Requirement already satisfied: langchain<=0.3,>0.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai[tools]) (0.2.12)\n",
+      "Requirement already satisfied: openai<2.0.0,>=1.13.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai[tools]) (1.40.2)\n",
+      "Collecting opentelemetry-api<2.0.0,>=1.22.0 (from crewai[tools])\n",
+      "  Downloading opentelemetry_api-1.26.0-py3-none-any.whl.metadata (1.4 kB)\n",
+      "Collecting opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0 (from crewai[tools])\n",
+      "  Downloading opentelemetry_exporter_otlp_proto_http-1.26.0-py3-none-any.whl.metadata (2.3 kB)\n",
+      "Collecting opentelemetry-sdk<2.0.0,>=1.22.0 (from crewai[tools])\n",
+      "  Downloading opentelemetry_sdk-1.26.0-py3-none-any.whl.metadata (1.5 kB)\n",
+      "Requirement already satisfied: pydantic<3.0.0,>=2.4.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai[tools]) (2.8.2)\n",
+      "Requirement already satisfied: python-dotenv<2.0.0,>=1.0.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai[tools]) (1.0.1)\n",
+      "Collecting regex<2024.0.0,>=2023.12.25 (from crewai[tools])\n",
+      "  Using cached regex-2023.12.25-cp312-cp312-macosx_11_0_arm64.whl.metadata (40 kB)\n",
+      "Requirement already satisfied: aiohttp<4.0.0,>=3.9.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai[tools]) (3.10.2)\n",
+      "Collecting docstring-parser<0.17,>=0.16 (from instructor==1.3.3->crewai[tools])\n",
+      "  Using cached docstring_parser-0.16-py3-none-any.whl.metadata (3.0 kB)\n",
+      "Collecting jiter<0.5.0,>=0.4.1 (from instructor==1.3.3->crewai[tools])\n",
+      "  Using cached jiter-0.4.2-cp312-cp312-macosx_11_0_arm64.whl.metadata (3.6 kB)\n",
+      "Requirement already satisfied: pydantic-core<3.0.0,>=2.18.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai[tools]) (2.20.1)\n",
+      "Collecting rich<14.0.0,>=13.7.0 (from instructor==1.3.3->crewai[tools])\n",
+      "  Using cached rich-13.7.1-py3-none-any.whl.metadata (18 kB)\n",
+      "Requirement already satisfied: tenacity<9.0.0,>=8.2.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai[tools]) (8.5.0)\n",
+      "Collecting typer<1.0.0,>=0.9.0 (from instructor==1.3.3->crewai[tools])\n",
+      "  Using cached typer-0.12.3-py3-none-any.whl.metadata (15 kB)\n",
+      "Collecting beautifulsoup4<5.0.0,>=4.12.3 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached beautifulsoup4-4.12.3-py3-none-any.whl.metadata (3.8 kB)\n",
+      "Collecting chromadb<0.5.0,>=0.4.22 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached chromadb-0.4.24-py3-none-any.whl.metadata (7.3 kB)\n",
+      "Requirement already satisfied: docker<8.0.0,>=7.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (7.1.0)\n",
+      "Collecting docx2txt<0.9,>=0.8 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached docx2txt-0.8-py3-none-any.whl\n",
+      "Collecting lancedb<0.6.0,>=0.5.4 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached lancedb-0.5.7-py3-none-any.whl.metadata (17 kB)\n",
+      "Collecting pyright<2.0.0,>=1.1.350 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading pyright-1.1.375-py3-none-any.whl.metadata (6.2 kB)\n",
+      "Collecting pytest<9.0.0,>=8.0.0 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading pytest-8.3.2-py3-none-any.whl.metadata (7.5 kB)\n",
+      "Collecting pytube<16.0.0,>=15.0.0 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached pytube-15.0.0-py3-none-any.whl.metadata (5.0 kB)\n",
+      "Requirement already satisfied: requests<3.0.0,>=2.31.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (2.31.0)\n",
+      "Collecting selenium<5.0.0,>=4.18.1 (from crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading selenium-4.23.1-py3-none-any.whl.metadata (7.1 kB)\n",
+      "Collecting alembic<2.0.0,>=1.13.1 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached alembic-1.13.2-py3-none-any.whl.metadata (7.4 kB)\n",
+      "Requirement already satisfied: cohere<6.0,>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai[tools]) (5.6.2)\n",
+      "Collecting google-cloud-aiplatform<2.0.0,>=1.26.1 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading google_cloud_aiplatform-1.61.0-py2.py3-none-any.whl.metadata (31 kB)\n",
+      "Collecting gptcache<0.2.0,>=0.1.43 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading gptcache-0.1.44-py3-none-any.whl.metadata (24 kB)\n",
+      "Collecting langchain-cohere<0.2.0,>=0.1.4 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached langchain_cohere-0.1.9-py3-none-any.whl.metadata (6.6 kB)\n",
+      "Collecting langchain-community<0.3.0,>=0.2.6 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading langchain_community-0.2.11-py3-none-any.whl.metadata (2.7 kB)\n",
+      "Collecting langchain-openai<0.2.0,>=0.1.7 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading langchain_openai-0.1.20-py3-none-any.whl.metadata (2.6 kB)\n",
+      "Collecting mem0ai<0.0.10,>=0.0.9 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading mem0ai-0.0.9-py3-none-any.whl.metadata (3.8 kB)\n",
+      "Collecting posthog<4.0.0,>=3.0.2 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached posthog-3.5.0-py2.py3-none-any.whl.metadata (2.0 kB)\n",
+      "Collecting pypdf<5.0.0,>=4.0.1 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading pypdf-4.3.1-py3-none-any.whl.metadata (7.4 kB)\n",
+      "Collecting pysbd<0.4.0,>=0.3.4 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached pysbd-0.3.4-py3-none-any.whl.metadata (6.1 kB)\n",
+      "Collecting schema<0.8.0,>=0.7.5 (from embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached schema-0.7.7-py2.py3-none-any.whl.metadata (34 kB)\n",
+      "Requirement already satisfied: sqlalchemy<3.0.0,>=2.0.27 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai[tools]) (2.0.32)\n",
+      "Requirement already satisfied: tiktoken<0.8.0,>=0.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai[tools]) (0.7.0)\n",
+      "Requirement already satisfied: PyYAML>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai[tools]) (6.0.2)\n",
+      "Requirement already satisfied: langchain-core<0.3.0,>=0.2.27 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai[tools]) (0.2.29)\n",
+      "Requirement already satisfied: langchain-text-splitters<0.3.0,>=0.2.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai[tools]) (0.2.2)\n",
+      "Requirement already satisfied: langsmith<0.2.0,>=0.1.17 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai[tools]) (0.1.98)\n",
+      "Requirement already satisfied: numpy<2.0.0,>=1.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai[tools]) (1.26.4)\n",
+      "Requirement already satisfied: anyio<5,>=3.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai[tools]) (4.4.0)\n",
+      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai[tools]) (1.9.0)\n",
+      "Requirement already satisfied: httpx<1,>=0.23.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai[tools]) (0.27.0)\n",
+      "Requirement already satisfied: sniffio in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai[tools]) (1.3.1)\n",
+      "Requirement already satisfied: tqdm>4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai[tools]) (4.66.5)\n",
+      "Requirement already satisfied: typing-extensions<5,>=4.11 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai[tools]) (4.12.2)\n",
+      "Collecting deprecated>=1.2.6 (from opentelemetry-api<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Using cached Deprecated-1.2.14-py2.py3-none-any.whl.metadata (5.4 kB)\n",
+      "Collecting importlib-metadata<=8.0.0,>=6.0 (from opentelemetry-api<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Using cached importlib_metadata-8.0.0-py3-none-any.whl.metadata (4.6 kB)\n",
+      "Collecting googleapis-common-protos~=1.52 (from opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Using cached googleapis_common_protos-1.63.2-py2.py3-none-any.whl.metadata (1.5 kB)\n",
+      "Collecting opentelemetry-exporter-otlp-proto-common==1.26.0 (from opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Downloading opentelemetry_exporter_otlp_proto_common-1.26.0-py3-none-any.whl.metadata (1.8 kB)\n",
+      "Collecting opentelemetry-proto==1.26.0 (from opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Downloading opentelemetry_proto-1.26.0-py3-none-any.whl.metadata (2.3 kB)\n",
+      "Collecting protobuf<5.0,>=3.19 (from opentelemetry-proto==1.26.0->opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Downloading protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl.metadata (541 bytes)\n",
+      "Collecting opentelemetry-semantic-conventions==0.47b0 (from opentelemetry-sdk<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Downloading opentelemetry_semantic_conventions-0.47b0-py3-none-any.whl.metadata (2.4 kB)\n",
+      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3.0.0,>=2.4.2->crewai[tools]) (0.7.0)\n",
+      "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai[tools]) (2.3.5)\n",
+      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai[tools]) (1.3.1)\n",
+      "Requirement already satisfied: attrs>=17.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai[tools]) (24.2.0)\n",
+      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai[tools]) (1.4.1)\n",
+      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai[tools]) (6.0.5)\n",
+      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai[tools]) (1.9.4)\n",
+      "Collecting Mako (from alembic<2.0.0,>=1.13.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached Mako-1.3.5-py3-none-any.whl.metadata (2.9 kB)\n",
+      "Requirement already satisfied: idna>=2.8 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from anyio<5,>=3.5.0->openai<2.0.0,>=1.13.3->crewai[tools]) (3.7)\n",
+      "Collecting soupsieve>1.2 (from beautifulsoup4<5.0.0,>=4.12.3->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached soupsieve-2.5-py3-none-any.whl.metadata (4.7 kB)\n",
+      "Collecting build>=1.0.3 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached build-1.2.1-py3-none-any.whl.metadata (4.3 kB)\n",
+      "Collecting chroma-hnswlib==0.7.3 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached chroma_hnswlib-0.7.3-cp312-cp312-macosx_13_0_arm64.whl\n",
+      "Collecting fastapi>=0.95.2 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading fastapi-0.112.0-py3-none-any.whl.metadata (27 kB)\n",
+      "Collecting uvicorn>=0.18.3 (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading uvicorn-0.30.5-py3-none-any.whl.metadata (6.6 kB)\n",
+      "Collecting pulsar-client>=3.1.0 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached pulsar_client-3.5.0-cp312-cp312-macosx_10_15_universal2.whl.metadata (1.0 kB)\n",
+      "Collecting onnxruntime>=1.14.1 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached onnxruntime-1.18.1-cp312-cp312-macosx_11_0_universal2.whl.metadata (4.3 kB)\n",
+      "Collecting opentelemetry-exporter-otlp-proto-grpc>=1.2.0 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading opentelemetry_exporter_otlp_proto_grpc-1.26.0-py3-none-any.whl.metadata (2.3 kB)\n",
+      "Collecting opentelemetry-instrumentation-fastapi>=0.41b0 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading opentelemetry_instrumentation_fastapi-0.47b0-py3-none-any.whl.metadata (2.1 kB)\n",
+      "Requirement already satisfied: tokenizers>=0.13.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (0.20.0)\n",
+      "Collecting pypika>=0.48.9 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached PyPika-0.48.9-py2.py3-none-any.whl\n",
+      "Collecting overrides>=7.3.1 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached overrides-7.7.0-py3-none-any.whl.metadata (5.8 kB)\n",
+      "Collecting importlib-resources (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached importlib_resources-6.4.0-py3-none-any.whl.metadata (3.9 kB)\n",
+      "Collecting grpcio>=1.58.0 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading grpcio-1.65.4-cp312-cp312-macosx_10_9_universal2.whl.metadata (3.3 kB)\n",
+      "Collecting bcrypt>=4.0.1 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl.metadata (9.6 kB)\n",
+      "Collecting kubernetes>=28.1.0 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached kubernetes-30.1.0-py2.py3-none-any.whl.metadata (1.5 kB)\n",
+      "Collecting mmh3>=4.0.1 (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached mmh3-4.1.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (13 kB)\n",
+      "Requirement already satisfied: orjson>=3.9.12 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (3.10.7)\n",
+      "Requirement already satisfied: boto3<2.0.0,>=1.34.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (1.34.157)\n",
+      "Requirement already satisfied: fastavro<2.0.0,>=1.9.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (1.9.5)\n",
+      "Requirement already satisfied: httpx-sse<0.5.0,>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (0.4.0)\n",
+      "Requirement already satisfied: parameterized<0.10.0,>=0.9.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (0.9.0)\n",
+      "Requirement already satisfied: types-requests<3.0.0,>=2.0.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (2.32.0.20240712)\n",
+      "Collecting wrapt<2,>=1.10 (from deprecated>=1.2.6->opentelemetry-api<2.0.0,>=1.22.0->crewai[tools])\n",
+      "  Using cached wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (6.6 kB)\n",
+      "Requirement already satisfied: urllib3>=1.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from docker<8.0.0,>=7.1.0->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (2.2.2)\n",
+      "Collecting google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1 (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached google_api_core-2.19.1-py3-none-any.whl.metadata (2.7 kB)\n",
+      "Collecting google-auth<3.0.0dev,>=2.14.1 (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading google_auth-2.33.0-py2.py3-none-any.whl.metadata (4.7 kB)\n",
+      "Collecting proto-plus<2.0.0dev,>=1.22.3 (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached proto_plus-1.24.0-py3-none-any.whl.metadata (2.2 kB)\n",
+      "Requirement already satisfied: packaging>=14.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools]) (23.2)\n",
+      "Collecting google-cloud-storage<3.0.0dev,>=1.32.0 (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading google_cloud_storage-2.18.2-py2.py3-none-any.whl.metadata (9.1 kB)\n",
+      "Collecting google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0 (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached google_cloud_bigquery-3.25.0-py2.py3-none-any.whl.metadata (8.9 kB)\n",
+      "Collecting google-cloud-resource-manager<3.0.0dev,>=1.3.3 (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading google_cloud_resource_manager-1.12.5-py2.py3-none-any.whl.metadata (5.3 kB)\n",
+      "Collecting shapely<3.0.0dev (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached shapely-2.0.5-cp312-cp312-macosx_11_0_arm64.whl.metadata (7.0 kB)\n",
+      "Collecting cachetools (from gptcache<0.2.0,>=0.1.43->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached cachetools-5.4.0-py3-none-any.whl.metadata (5.3 kB)\n",
+      "Requirement already satisfied: certifi in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai<2.0.0,>=1.13.3->crewai[tools]) (2024.7.4)\n",
+      "Requirement already satisfied: httpcore==1.* in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai<2.0.0,>=1.13.3->crewai[tools]) (1.0.5)\n",
+      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai<2.0.0,>=1.13.3->crewai[tools]) (0.14.0)\n",
+      "Requirement already satisfied: zipp>=0.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from importlib-metadata<=8.0.0,>=6.0->opentelemetry-api<2.0.0,>=1.22.0->crewai[tools]) (3.19.2)\n",
+      "Collecting deprecation (from lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n",
+      "Collecting pylance==0.9.18 (from lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached pylance-0.9.18-cp38-abi3-macosx_11_0_arm64.whl.metadata (7.2 kB)\n",
+      "Collecting ratelimiter~=1.0 (from lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached ratelimiter-1.2.0.post0-py3-none-any.whl.metadata (4.0 kB)\n",
+      "Collecting retry>=0.9.2 (from lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached retry-0.9.2-py2.py3-none-any.whl.metadata (5.8 kB)\n",
+      "Collecting semver>=3.0 (from lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached semver-3.0.2-py3-none-any.whl.metadata (5.0 kB)\n",
+      "Collecting pyarrow>=12 (from pylance==0.9.18->lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (3.3 kB)\n",
+      "Collecting langchain-experimental>=0.0.6 (from langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading langchain_experimental-0.0.64-py3-none-any.whl.metadata (1.7 kB)\n",
+      "Collecting pandas>=1.4.3 (from langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl.metadata (19 kB)\n",
+      "Collecting tabulate<0.10.0,>=0.9.0 (from langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached tabulate-0.9.0-py3-none-any.whl.metadata (34 kB)\n",
+      "Collecting dataclasses-json<0.7,>=0.5.7 (from langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n",
+      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.27->langchain<=0.3,>0.2->crewai[tools]) (1.33)\n",
+      "Collecting qdrant-client<2.0.0,>=1.9.1 (from mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached qdrant_client-1.10.1-py3-none-any.whl.metadata (10 kB)\n",
+      "Requirement already satisfied: six>=1.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai[tools]) (1.16.0)\n",
+      "Collecting monotonic>=1.5 (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached monotonic-1.6-py2.py3-none-any.whl.metadata (1.5 kB)\n",
+      "Collecting backoff>=1.10.0 (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached backoff-2.2.1-py3-none-any.whl.metadata (14 kB)\n",
+      "Requirement already satisfied: python-dateutil>2.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai[tools]) (2.9.0.post0)\n",
+      "Collecting nodeenv>=1.6.0 (from pyright<2.0.0,>=1.1.350->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached nodeenv-1.9.1-py2.py3-none-any.whl.metadata (21 kB)\n",
+      "Collecting iniconfig (from pytest<9.0.0,>=8.0.0->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached iniconfig-2.0.0-py3-none-any.whl.metadata (2.6 kB)\n",
+      "Collecting pluggy<2,>=1.5 (from pytest<9.0.0,>=8.0.0->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached pluggy-1.5.0-py3-none-any.whl.metadata (4.8 kB)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3.0.0,>=2.31.0->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (3.3.2)\n",
+      "Collecting markdown-it-py>=2.2.0 (from rich<14.0.0,>=13.7.0->instructor==1.3.3->crewai[tools])\n",
+      "  Using cached markdown_it_py-3.0.0-py3-none-any.whl.metadata (6.9 kB)\n",
+      "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from rich<14.0.0,>=13.7.0->instructor==1.3.3->crewai[tools]) (2.18.0)\n",
+      "Collecting trio~=0.17 (from selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading trio-0.26.2-py3-none-any.whl.metadata (8.6 kB)\n",
+      "Collecting trio-websocket~=0.9 (from selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached trio_websocket-0.11.1-py3-none-any.whl.metadata (4.7 kB)\n",
+      "Collecting websocket-client~=1.8 (from selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached websocket_client-1.8.0-py3-none-any.whl.metadata (8.0 kB)\n",
+      "Collecting shellingham>=1.3.0 (from typer<1.0.0,>=0.9.0->instructor==1.3.3->crewai[tools])\n",
+      "  Using cached shellingham-1.5.4-py2.py3-none-any.whl.metadata (3.5 kB)\n",
+      "Requirement already satisfied: botocore<1.35.0,>=1.34.157 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from boto3<2.0.0,>=1.34.0->cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (1.34.157)\n",
+      "Requirement already satisfied: jmespath<2.0.0,>=0.7.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from boto3<2.0.0,>=1.34.0->cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (1.0.1)\n",
+      "Requirement already satisfied: s3transfer<0.11.0,>=0.10.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from boto3<2.0.0,>=1.34.0->cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai[tools]) (0.10.2)\n",
+      "Collecting pyproject_hooks (from build>=1.0.3->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached pyproject_hooks-1.1.0-py3-none-any.whl.metadata (1.3 kB)\n",
+      "Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached marshmallow-3.21.3-py3-none-any.whl.metadata (7.1 kB)\n",
+      "Collecting typing-inspect<1,>=0.4.0 (from dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n",
+      "Collecting starlette<0.38.0,>=0.37.2 (from fastapi>=0.95.2->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached starlette-0.37.2-py3-none-any.whl.metadata (5.9 kB)\n",
+      "Collecting grpcio-status<2.0.dev0,>=1.33.2 (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading grpcio_status-1.65.4-py3-none-any.whl.metadata (1.1 kB)\n",
+      "Collecting pyasn1-modules>=0.2.1 (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached pyasn1_modules-0.4.0-py3-none-any.whl.metadata (3.4 kB)\n",
+      "Collecting rsa<5,>=3.1.4 (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached rsa-4.9-py3-none-any.whl.metadata (4.2 kB)\n",
+      "Collecting google-cloud-core<3.0.0dev,>=1.6.0 (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached google_cloud_core-2.4.1-py2.py3-none-any.whl.metadata (2.7 kB)\n",
+      "Collecting google-resumable-media<3.0dev,>=0.6.0 (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading google_resumable_media-2.7.2-py2.py3-none-any.whl.metadata (2.2 kB)\n",
+      "Collecting grpc-google-iam-v1<1.0.0dev,>=0.12.4 (from google-cloud-resource-manager<3.0.0dev,>=1.3.3->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached grpc_google_iam_v1-0.13.1-py2.py3-none-any.whl.metadata (3.3 kB)\n",
+      "Collecting google-crc32c<2.0dev,>=1.0 (from google-cloud-storage<3.0.0dev,>=1.32.0->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached google_crc32c-1.5.0-py3-none-any.whl\n",
+      "Requirement already satisfied: jsonpointer>=1.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.3.0,>=0.2.27->langchain<=0.3,>0.2->crewai[tools]) (3.0.0)\n",
+      "Collecting requests-oauthlib (from kubernetes>=28.1.0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached requests_oauthlib-2.0.0-py2.py3-none-any.whl.metadata (11 kB)\n",
+      "Collecting oauthlib>=3.2.2 (from kubernetes>=28.1.0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached oauthlib-3.2.2-py3-none-any.whl.metadata (7.5 kB)\n",
+      "Collecting mdurl~=0.1 (from markdown-it-py>=2.2.0->rich<14.0.0,>=13.7.0->instructor==1.3.3->crewai[tools])\n",
+      "  Using cached mdurl-0.1.2-py3-none-any.whl.metadata (1.6 kB)\n",
+      "Collecting coloredlogs (from onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached coloredlogs-15.0.1-py2.py3-none-any.whl.metadata (12 kB)\n",
+      "Collecting flatbuffers (from onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached flatbuffers-24.3.25-py2.py3-none-any.whl.metadata (850 bytes)\n",
+      "Collecting sympy (from onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached sympy-1.13.1-py3-none-any.whl.metadata (12 kB)\n",
+      "Collecting opentelemetry-instrumentation-asgi==0.47b0 (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading opentelemetry_instrumentation_asgi-0.47b0-py3-none-any.whl.metadata (2.0 kB)\n",
+      "Collecting opentelemetry-instrumentation==0.47b0 (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading opentelemetry_instrumentation-0.47b0-py3-none-any.whl.metadata (6.1 kB)\n",
+      "Collecting opentelemetry-util-http==0.47b0 (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading opentelemetry_util_http-0.47b0-py3-none-any.whl.metadata (2.5 kB)\n",
+      "Collecting setuptools>=16.0 (from opentelemetry-instrumentation==0.47b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading setuptools-72.1.0-py3-none-any.whl.metadata (6.6 kB)\n",
+      "Collecting asgiref~=3.0 (from opentelemetry-instrumentation-asgi==0.47b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached asgiref-3.8.1-py3-none-any.whl.metadata (9.3 kB)\n",
+      "Collecting pytz>=2020.1 (from pandas>=1.4.3->langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached pytz-2024.1-py2.py3-none-any.whl.metadata (22 kB)\n",
+      "Collecting tzdata>=2022.7 (from pandas>=1.4.3->langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached tzdata-2024.1-py2.py3-none-any.whl.metadata (1.4 kB)\n",
+      "Collecting grpcio-tools>=1.41.0 (from qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading grpcio_tools-1.65.4-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "Collecting portalocker<3.0.0,>=2.7.0 (from qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached portalocker-2.10.1-py3-none-any.whl.metadata (8.5 kB)\n",
+      "Requirement already satisfied: decorator>=3.4.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from retry>=0.9.2->lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (5.1.1)\n",
+      "Collecting py<2.0.0,>=1.4.26 (from retry>=0.9.2->lancedb<0.6.0,>=0.5.4->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached py-1.11.0-py2.py3-none-any.whl.metadata (2.8 kB)\n",
+      "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from tokenizers>=0.13.2->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (0.24.5)\n",
+      "Collecting sortedcontainers (from trio~=0.17->selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached sortedcontainers-2.4.0-py2.py3-none-any.whl.metadata (10 kB)\n",
+      "Collecting outcome (from trio~=0.17->selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached outcome-1.3.0.post0-py2.py3-none-any.whl.metadata (2.6 kB)\n",
+      "Collecting wsproto>=0.14 (from trio-websocket~=0.9->selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached wsproto-1.2.0-py3-none-any.whl.metadata (5.6 kB)\n",
+      "Collecting pysocks!=1.5.7,<2.0,>=1.5.6 (from urllib3[socks]<3,>=1.26->selenium<5.0.0,>=4.18.1->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached PySocks-1.7.1-py3-none-any.whl.metadata (13 kB)\n",
+      "Collecting httptools>=0.5.0 (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl.metadata (3.6 kB)\n",
+      "Collecting uvloop!=0.15.0,!=0.15.1,>=0.14.0 (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl.metadata (4.9 kB)\n",
+      "Collecting watchfiles>=0.13 (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Downloading watchfiles-0.23.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (4.9 kB)\n",
+      "Collecting websockets>=10.4 (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (6.6 kB)\n",
+      "Requirement already satisfied: MarkupSafe>=0.9.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from Mako->alembic<2.0.0,>=1.13.1->embedchain<0.2.0,>=0.1.114->crewai[tools]) (2.1.5)\n",
+      "INFO: pip is looking at multiple versions of grpcio-status to determine which version is compatible with other requirements. This could take a while.\n",
+      "Collecting grpcio-status<2.0.dev0,>=1.33.2 (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading grpcio_status-1.65.2-py3-none-any.whl.metadata (1.1 kB)\n",
+      "  Using cached grpcio_status-1.65.1-py3-none-any.whl.metadata (1.1 kB)\n",
+      "  Downloading grpcio_status-1.64.3-py3-none-any.whl.metadata (1.1 kB)\n",
+      "  Using cached grpcio_status-1.64.1-py3-none-any.whl.metadata (1.1 kB)\n",
+      "  Using cached grpcio_status-1.64.0-py3-none-any.whl.metadata (1.1 kB)\n",
+      "  Downloading grpcio_status-1.63.2-py3-none-any.whl.metadata (1.1 kB)\n",
+      "  Using cached grpcio_status-1.63.0-py3-none-any.whl.metadata (1.1 kB)\n",
+      "INFO: pip is still looking at multiple versions of grpcio-status to determine which version is compatible with other requirements. This could take a while.\n",
+      "  Downloading grpcio_status-1.62.3-py3-none-any.whl.metadata (1.3 kB)\n",
+      "INFO: pip is looking at multiple versions of grpcio-tools to determine which version is compatible with other requirements. This could take a while.\n",
+      "Collecting grpcio-tools>=1.41.0 (from qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Downloading grpcio_tools-1.65.2-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "  Using cached grpcio_tools-1.65.1-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "  Downloading grpcio_tools-1.64.3-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "  Using cached grpcio_tools-1.64.1-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "  Using cached grpcio_tools-1.64.0-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "  Downloading grpcio_tools-1.63.2-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "  Using cached grpcio_tools-1.63.0-cp312-cp312-macosx_10_9_universal2.whl.metadata (5.3 kB)\n",
+      "INFO: pip is still looking at multiple versions of grpcio-tools to determine which version is compatible with other requirements. This could take a while.\n",
+      "  Downloading grpcio_tools-1.62.3-cp312-cp312-macosx_10_10_universal2.whl.metadata (6.2 kB)\n",
+      "Collecting h2<5,>=3 (from httpx[http2]>=0.20.0->qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached h2-4.1.0-py3-none-any.whl.metadata (3.6 kB)\n",
+      "Requirement already satisfied: filelock in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.2->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (3.15.4)\n",
+      "Requirement already satisfied: fsspec>=2023.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.2->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools]) (2024.6.1)\n",
+      "Collecting pyasn1<0.7.0,>=0.4.6 (from pyasn1-modules>=0.2.1->google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached pyasn1-0.6.0-py2.py3-none-any.whl.metadata (8.3 kB)\n",
+      "Collecting mypy-extensions>=0.3.0 (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached mypy_extensions-1.0.0-py3-none-any.whl.metadata (1.1 kB)\n",
+      "Collecting humanfriendly>=9.1 (from coloredlogs->onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached humanfriendly-10.0-py2.py3-none-any.whl.metadata (9.2 kB)\n",
+      "Collecting mpmath<1.4,>=1.1.0 (from sympy->onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.22->crewai-tools<0.5.0,>=0.4.26->crewai[tools])\n",
+      "  Using cached mpmath-1.3.0-py3-none-any.whl.metadata (8.6 kB)\n",
+      "Collecting hyperframe<7,>=6.0 (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached hyperframe-6.0.1-py3-none-any.whl.metadata (2.7 kB)\n",
+      "Collecting hpack<5,>=4.0 (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai[tools])\n",
+      "  Using cached hpack-4.0.0-py3-none-any.whl.metadata (2.5 kB)\n",
+      "Using cached instructor-1.3.3-py3-none-any.whl (50 kB)\n",
+      "Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)\n",
+      "Using cached crewai_tools-0.4.26-py3-none-any.whl (84 kB)\n",
+      "Downloading embedchain-0.1.120-py3-none-any.whl (210 kB)\n",
+      "Using cached json_repair-0.25.3-py3-none-any.whl (12 kB)\n",
+      "Using cached jsonref-1.1.0-py3-none-any.whl (9.4 kB)\n",
+      "Downloading opentelemetry_api-1.26.0-py3-none-any.whl (61 kB)\n",
+      "Downloading opentelemetry_exporter_otlp_proto_http-1.26.0-py3-none-any.whl (16 kB)\n",
+      "Downloading opentelemetry_exporter_otlp_proto_common-1.26.0-py3-none-any.whl (17 kB)\n",
+      "Downloading opentelemetry_proto-1.26.0-py3-none-any.whl (52 kB)\n",
+      "Downloading opentelemetry_sdk-1.26.0-py3-none-any.whl (109 kB)\n",
+      "Downloading opentelemetry_semantic_conventions-0.47b0-py3-none-any.whl (138 kB)\n",
+      "Using cached regex-2023.12.25-cp312-cp312-macosx_11_0_arm64.whl (292 kB)\n",
+      "Using cached crewai-0.41.1-py3-none-any.whl (91 kB)\n",
+      "Using cached alembic-1.13.2-py3-none-any.whl (232 kB)\n",
+      "Using cached beautifulsoup4-4.12.3-py3-none-any.whl (147 kB)\n",
+      "Using cached chromadb-0.4.24-py3-none-any.whl (525 kB)\n",
+      "Using cached Deprecated-1.2.14-py2.py3-none-any.whl (9.6 kB)\n",
+      "Using cached docstring_parser-0.16-py3-none-any.whl (36 kB)\n",
+      "Downloading google_cloud_aiplatform-1.61.0-py2.py3-none-any.whl (5.1 MB)\n",
+      "\u001b[2K   \u001b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[38;5;237mβ•Ί\u001b[0m\u001b[38;5;237m━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.6/5.1 MB\u001b[0m \u001b[31m483.2 kB/s\u001b[0m eta \u001b[36m0:00:06\u001b[0m"
+     ]
+    }
+   ],
    "source": [
     "%pip install -U 'crewai[tools]'\n",
     "%pip install -U 'crewai[agentops]'"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
+    "from crewai import Crew, Agent, Task\n",
+    "from crewai_tools.tools import WebsiteSearchTool, SerperDevTool, FileReadTool\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"SERPER_API_KEY\"] = \"...\"\n",
-    "os.environ[\"OPENAI_API_KEY\"] = \"...\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"...\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
+    "SERPER_API_KEY = os.getenv(\"SERPER_API_KEY\") or \"\""
    ]
   },
   {
@@ -38,9 +431,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from crewai import Agent\n",
-    "from crewai_tools.tools import WebsiteSearchTool, SerperDevTool, FileReadTool\n",
-    "\n",
     "web_search_tool = WebsiteSearchTool()\n",
     "serper_dev_tool = SerperDevTool()\n",
     "file_read_tool = FileReadTool(\n",
@@ -85,8 +475,6 @@
    "outputs": [],
    "source": [
     "from textwrap import dedent\n",
-    "from crewai import Task\n",
-    "\n",
     "\n",
     "class Tasks:\n",
     "    def research_company_culture_task(self, agent, company_description, company_domain):\n",
@@ -167,10 +555,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from crewai import Crew\n",
-    "import agentops\n",
-    "\n",
-    "agentops.init(tags=[\"crew-job-posting-example\"])\n",
+    "agentops.init(default_tags=[\"crew-job-posting-example\"])\n",
     "\n",
     "tasks = Tasks()\n",
     "agents = Agents()\n",
@@ -223,11 +608,6 @@
     "\n",
     "agentops.end_session(\"Success\")"
    ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/crew/markdown_validator.ipynb b/examples/crew/markdown_validator.ipynb
new file mode 100644
index 00000000..2adc853f
--- /dev/null
+++ b/examples/crew/markdown_validator.ipynb
@@ -0,0 +1,604 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "9b2dac908ce82802",
+   "metadata": {},
+   "source": [
+    "# CrewAI Markdown Validator\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "925e51b6",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "8c6c9f08b3228dcb",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-09T20:15:44.174710Z",
+     "start_time": "2024-08-09T20:15:33.328104Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Requirement already satisfied: crewai in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (0.41.1)\n",
+      "Requirement already satisfied: appdirs<2.0.0,>=1.4.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.4.4)\n",
+      "Requirement already satisfied: click<9.0.0,>=8.1.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (8.1.7)\n",
+      "Requirement already satisfied: embedchain<0.2.0,>=0.1.114 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (0.1.120)\n",
+      "Requirement already satisfied: instructor==1.3.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.3.3)\n",
+      "Requirement already satisfied: json-repair<0.26.0,>=0.25.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (0.25.3)\n",
+      "Requirement already satisfied: jsonref<2.0.0,>=1.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.1.0)\n",
+      "Requirement already satisfied: langchain<=0.3,>0.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (0.2.12)\n",
+      "Requirement already satisfied: openai<2.0.0,>=1.13.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.40.2)\n",
+      "Requirement already satisfied: opentelemetry-api<2.0.0,>=1.22.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.26.0)\n",
+      "Requirement already satisfied: opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.26.0)\n",
+      "Requirement already satisfied: opentelemetry-sdk<2.0.0,>=1.22.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.26.0)\n",
+      "Requirement already satisfied: pydantic<3.0.0,>=2.4.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (2.8.2)\n",
+      "Requirement already satisfied: python-dotenv<2.0.0,>=1.0.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (1.0.1)\n",
+      "Requirement already satisfied: regex<2024.0.0,>=2023.12.25 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from crewai) (2023.12.25)\n",
+      "Requirement already satisfied: aiohttp<4.0.0,>=3.9.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (3.10.2)\n",
+      "Requirement already satisfied: docstring-parser<0.17,>=0.16 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (0.16)\n",
+      "Requirement already satisfied: jiter<0.5.0,>=0.4.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (0.4.2)\n",
+      "Requirement already satisfied: pydantic-core<3.0.0,>=2.18.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (2.20.1)\n",
+      "Requirement already satisfied: rich<14.0.0,>=13.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (13.7.1)\n",
+      "Requirement already satisfied: tenacity<9.0.0,>=8.2.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (8.5.0)\n",
+      "Requirement already satisfied: typer<1.0.0,>=0.9.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from instructor==1.3.3->crewai) (0.12.3)\n",
+      "Requirement already satisfied: alembic<2.0.0,>=1.13.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (1.13.2)\n",
+      "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.12.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (4.12.3)\n",
+      "Requirement already satisfied: chromadb<0.5.0,>=0.4.24 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.4.24)\n",
+      "Requirement already satisfied: cohere<6.0,>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (5.6.2)\n",
+      "Requirement already satisfied: google-cloud-aiplatform<2.0.0,>=1.26.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (1.61.0)\n",
+      "Requirement already satisfied: gptcache<0.2.0,>=0.1.43 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.1.44)\n",
+      "Requirement already satisfied: langchain-cohere<0.2.0,>=0.1.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.1.9)\n",
+      "Requirement already satisfied: langchain-community<0.3.0,>=0.2.6 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.2.11)\n",
+      "Requirement already satisfied: langchain-openai<0.2.0,>=0.1.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.1.20)\n",
+      "Requirement already satisfied: mem0ai<0.0.10,>=0.0.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.0.9)\n",
+      "Requirement already satisfied: posthog<4.0.0,>=3.0.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (3.5.0)\n",
+      "Requirement already satisfied: pypdf<5.0.0,>=4.0.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (4.3.1)\n",
+      "Requirement already satisfied: pysbd<0.4.0,>=0.3.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.3.4)\n",
+      "Requirement already satisfied: schema<0.8.0,>=0.7.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.7.7)\n",
+      "Requirement already satisfied: sqlalchemy<3.0.0,>=2.0.27 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (2.0.32)\n",
+      "Requirement already satisfied: tiktoken<0.8.0,>=0.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from embedchain<0.2.0,>=0.1.114->crewai) (0.7.0)\n",
+      "Requirement already satisfied: PyYAML>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai) (6.0.2)\n",
+      "Requirement already satisfied: langchain-core<0.3.0,>=0.2.27 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai) (0.2.29)\n",
+      "Requirement already satisfied: langchain-text-splitters<0.3.0,>=0.2.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai) (0.2.2)\n",
+      "Requirement already satisfied: langsmith<0.2.0,>=0.1.17 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai) (0.1.98)\n",
+      "Requirement already satisfied: numpy<2.0.0,>=1.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai) (1.26.4)\n",
+      "Requirement already satisfied: requests<3,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain<=0.3,>0.2->crewai) (2.31.0)\n",
+      "Requirement already satisfied: anyio<5,>=3.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai) (4.4.0)\n",
+      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai) (1.9.0)\n",
+      "Requirement already satisfied: httpx<1,>=0.23.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai) (0.27.0)\n",
+      "Requirement already satisfied: sniffio in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai) (1.3.1)\n",
+      "Requirement already satisfied: tqdm>4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai) (4.66.5)\n",
+      "Requirement already satisfied: typing-extensions<5,>=4.11 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.13.3->crewai) (4.12.2)\n",
+      "Requirement already satisfied: deprecated>=1.2.6 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-api<2.0.0,>=1.22.0->crewai) (1.2.14)\n",
+      "Requirement already satisfied: importlib-metadata<=8.0.0,>=6.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-api<2.0.0,>=1.22.0->crewai) (8.0.0)\n",
+      "Requirement already satisfied: googleapis-common-protos~=1.52 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai) (1.63.2)\n",
+      "Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai) (1.26.0)\n",
+      "Requirement already satisfied: opentelemetry-proto==1.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai) (1.26.0)\n",
+      "Requirement already satisfied: protobuf<5.0,>=3.19 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-proto==1.26.0->opentelemetry-exporter-otlp-proto-http<2.0.0,>=1.22.0->crewai) (4.25.4)\n",
+      "Requirement already satisfied: opentelemetry-semantic-conventions==0.47b0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-sdk<2.0.0,>=1.22.0->crewai) (0.47b0)\n",
+      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3.0.0,>=2.4.2->crewai) (0.7.0)\n",
+      "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai) (2.3.5)\n",
+      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai) (1.3.1)\n",
+      "Requirement already satisfied: attrs>=17.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai) (24.2.0)\n",
+      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai) (1.4.1)\n",
+      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai) (6.0.5)\n",
+      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor==1.3.3->crewai) (1.9.4)\n",
+      "Requirement already satisfied: Mako in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from alembic<2.0.0,>=1.13.1->embedchain<0.2.0,>=0.1.114->crewai) (1.3.5)\n",
+      "Requirement already satisfied: idna>=2.8 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from anyio<5,>=3.5.0->openai<2.0.0,>=1.13.3->crewai) (3.7)\n",
+      "Requirement already satisfied: soupsieve>1.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from beautifulsoup4<5.0.0,>=4.12.2->embedchain<0.2.0,>=0.1.114->crewai) (2.5)\n",
+      "Requirement already satisfied: build>=1.0.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.2.1)\n",
+      "Requirement already satisfied: chroma-hnswlib==0.7.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.7.3)\n",
+      "Requirement already satisfied: fastapi>=0.95.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.112.0)\n",
+      "Requirement already satisfied: uvicorn>=0.18.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.30.5)\n",
+      "Requirement already satisfied: pulsar-client>=3.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (3.5.0)\n",
+      "Requirement already satisfied: onnxruntime>=1.14.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.18.1)\n",
+      "Requirement already satisfied: opentelemetry-exporter-otlp-proto-grpc>=1.2.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.26.0)\n",
+      "Requirement already satisfied: opentelemetry-instrumentation-fastapi>=0.41b0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.47b0)\n",
+      "Requirement already satisfied: tokenizers>=0.13.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.20.0)\n",
+      "Requirement already satisfied: pypika>=0.48.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.48.9)\n",
+      "Requirement already satisfied: overrides>=7.3.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (7.7.0)\n",
+      "Requirement already satisfied: importlib-resources in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (6.4.0)\n",
+      "Requirement already satisfied: grpcio>=1.58.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.65.4)\n",
+      "Requirement already satisfied: bcrypt>=4.0.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (4.2.0)\n",
+      "Requirement already satisfied: kubernetes>=28.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (30.1.0)\n",
+      "Requirement already satisfied: mmh3>=4.0.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (4.1.0)\n",
+      "Requirement already satisfied: orjson>=3.9.12 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (3.10.7)\n",
+      "Requirement already satisfied: boto3<2.0.0,>=1.34.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (1.34.157)\n",
+      "Requirement already satisfied: fastavro<2.0.0,>=1.9.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (1.9.5)\n",
+      "Requirement already satisfied: httpx-sse<0.5.0,>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (0.4.0)\n",
+      "Requirement already satisfied: parameterized<0.10.0,>=0.9.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (0.9.0)\n",
+      "Requirement already satisfied: types-requests<3.0.0,>=2.0.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (2.32.0.20240712)\n",
+      "Requirement already satisfied: wrapt<2,>=1.10 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from deprecated>=1.2.6->opentelemetry-api<2.0.0,>=1.22.0->crewai) (1.16.0)\n",
+      "Requirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (2.19.1)\n",
+      "Requirement already satisfied: google-auth<3.0.0dev,>=2.14.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (2.33.0)\n",
+      "Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (1.24.0)\n",
+      "Requirement already satisfied: packaging>=14.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (23.2)\n",
+      "Requirement already satisfied: google-cloud-storage<3.0.0dev,>=1.32.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (2.18.2)\n",
+      "Requirement already satisfied: google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (3.25.0)\n",
+      "Requirement already satisfied: google-cloud-resource-manager<3.0.0dev,>=1.3.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (1.12.5)\n",
+      "Requirement already satisfied: shapely<3.0.0dev in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (2.0.5)\n",
+      "Requirement already satisfied: cachetools in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from gptcache<0.2.0,>=0.1.43->embedchain<0.2.0,>=0.1.114->crewai) (5.4.0)\n",
+      "Requirement already satisfied: certifi in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai<2.0.0,>=1.13.3->crewai) (2024.7.4)\n",
+      "Requirement already satisfied: httpcore==1.* in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai<2.0.0,>=1.13.3->crewai) (1.0.5)\n",
+      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai<2.0.0,>=1.13.3->crewai) (0.14.0)\n",
+      "Requirement already satisfied: zipp>=0.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from importlib-metadata<=8.0.0,>=6.0->opentelemetry-api<2.0.0,>=1.22.0->crewai) (3.19.2)\n",
+      "Requirement already satisfied: langchain-experimental>=0.0.6 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai) (0.0.64)\n",
+      "Requirement already satisfied: pandas>=1.4.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai) (2.2.2)\n",
+      "Requirement already satisfied: tabulate<0.10.0,>=0.9.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai) (0.9.0)\n",
+      "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai) (0.6.7)\n",
+      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.27->langchain<=0.3,>0.2->crewai) (1.33)\n",
+      "Requirement already satisfied: qdrant-client<2.0.0,>=1.9.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai) (1.10.1)\n",
+      "Requirement already satisfied: six>=1.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai) (1.16.0)\n",
+      "Requirement already satisfied: monotonic>=1.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai) (1.6)\n",
+      "Requirement already satisfied: backoff>=1.10.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai) (2.2.1)\n",
+      "Requirement already satisfied: python-dateutil>2.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from posthog<4.0.0,>=3.0.2->embedchain<0.2.0,>=0.1.114->crewai) (2.9.0.post0)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langchain<=0.3,>0.2->crewai) (3.3.2)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langchain<=0.3,>0.2->crewai) (2.2.2)\n",
+      "Requirement already satisfied: markdown-it-py>=2.2.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from rich<14.0.0,>=13.7.0->instructor==1.3.3->crewai) (3.0.0)\n",
+      "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from rich<14.0.0,>=13.7.0->instructor==1.3.3->crewai) (2.18.0)\n",
+      "Requirement already satisfied: shellingham>=1.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from typer<1.0.0,>=0.9.0->instructor==1.3.3->crewai) (1.5.4)\n",
+      "Requirement already satisfied: botocore<1.35.0,>=1.34.157 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from boto3<2.0.0,>=1.34.0->cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (1.34.157)\n",
+      "Requirement already satisfied: jmespath<2.0.0,>=0.7.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from boto3<2.0.0,>=1.34.0->cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (1.0.1)\n",
+      "Requirement already satisfied: s3transfer<0.11.0,>=0.10.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from boto3<2.0.0,>=1.34.0->cohere<6.0,>=5.3->embedchain<0.2.0,>=0.1.114->crewai) (0.10.2)\n",
+      "Requirement already satisfied: pyproject_hooks in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from build>=1.0.3->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.1.0)\n",
+      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai) (3.21.3)\n",
+      "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai) (0.9.0)\n",
+      "Requirement already satisfied: starlette<0.38.0,>=0.37.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from fastapi>=0.95.2->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.37.2)\n",
+      "Requirement already satisfied: grpcio-status<2.0.dev0,>=1.33.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (1.62.3)\n",
+      "Requirement already satisfied: pyasn1-modules>=0.2.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (0.4.0)\n",
+      "Requirement already satisfied: rsa<5,>=3.1.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (4.9)\n",
+      "Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.6.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (2.4.1)\n",
+      "Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (2.7.2)\n",
+      "Requirement already satisfied: grpc-google-iam-v1<1.0.0dev,>=0.12.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-resource-manager<3.0.0dev,>=1.3.3->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (0.13.1)\n",
+      "Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from google-cloud-storage<3.0.0dev,>=1.32.0->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (1.5.0)\n",
+      "Requirement already satisfied: jsonpointer>=1.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.3.0,>=0.2.27->langchain<=0.3,>0.2->crewai) (3.0.0)\n",
+      "Requirement already satisfied: websocket-client!=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from kubernetes>=28.1.0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.8.0)\n",
+      "Requirement already satisfied: requests-oauthlib in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from kubernetes>=28.1.0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (2.0.0)\n",
+      "Requirement already satisfied: oauthlib>=3.2.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from kubernetes>=28.1.0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (3.2.2)\n",
+      "Requirement already satisfied: mdurl~=0.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from markdown-it-py>=2.2.0->rich<14.0.0,>=13.7.0->instructor==1.3.3->crewai) (0.1.2)\n",
+      "Requirement already satisfied: coloredlogs in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (15.0.1)\n",
+      "Requirement already satisfied: flatbuffers in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (24.3.25)\n",
+      "Requirement already satisfied: sympy in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.13.1)\n",
+      "Requirement already satisfied: opentelemetry-instrumentation-asgi==0.47b0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.47b0)\n",
+      "Requirement already satisfied: opentelemetry-instrumentation==0.47b0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.47b0)\n",
+      "Requirement already satisfied: opentelemetry-util-http==0.47b0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.47b0)\n",
+      "Requirement already satisfied: setuptools>=16.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-instrumentation==0.47b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (72.1.0)\n",
+      "Requirement already satisfied: asgiref~=3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from opentelemetry-instrumentation-asgi==0.47b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (3.8.1)\n",
+      "Requirement already satisfied: pytz>=2020.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pandas>=1.4.3->langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai) (2024.1)\n",
+      "Requirement already satisfied: tzdata>=2022.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pandas>=1.4.3->langchain-cohere<0.2.0,>=0.1.4->embedchain<0.2.0,>=0.1.114->crewai) (2024.1)\n",
+      "Requirement already satisfied: grpcio-tools>=1.41.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai) (1.62.3)\n",
+      "Requirement already satisfied: portalocker<3.0.0,>=2.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai) (2.10.1)\n",
+      "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from tokenizers>=0.13.2->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.24.5)\n",
+      "Requirement already satisfied: httptools>=0.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.6.1)\n",
+      "Requirement already satisfied: uvloop!=0.15.0,!=0.15.1,>=0.14.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.19.0)\n",
+      "Requirement already satisfied: watchfiles>=0.13 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (0.23.0)\n",
+      "Requirement already satisfied: websockets>=10.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from uvicorn[standard]>=0.18.3->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (12.0)\n",
+      "Requirement already satisfied: MarkupSafe>=0.9.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from Mako->alembic<2.0.0,>=1.13.1->embedchain<0.2.0,>=0.1.114->crewai) (2.1.5)\n",
+      "Requirement already satisfied: h2<5,>=3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx[http2]>=0.20.0->qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai) (4.1.0)\n",
+      "Requirement already satisfied: filelock in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.2->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (3.15.4)\n",
+      "Requirement already satisfied: fsspec>=2023.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.2->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (2024.6.1)\n",
+      "Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform<2.0.0,>=1.26.1->embedchain<0.2.0,>=0.1.114->crewai) (0.6.0)\n",
+      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.6->embedchain<0.2.0,>=0.1.114->crewai) (1.0.0)\n",
+      "Requirement already satisfied: humanfriendly>=9.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from coloredlogs->onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (10.0)\n",
+      "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from sympy->onnxruntime>=1.14.1->chromadb<0.5.0,>=0.4.24->embedchain<0.2.0,>=0.1.114->crewai) (1.3.0)\n",
+      "Requirement already satisfied: hyperframe<7,>=6.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai) (6.0.1)\n",
+      "Requirement already satisfied: hpack<5,>=4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client<2.0.0,>=1.9.1->mem0ai<0.0.10,>=0.0.9->embedchain<0.2.0,>=0.1.114->crewai) (4.0.0)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n",
+      "Requirement already satisfied: agentops in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (0.3.7)\n",
+      "Requirement already satisfied: requests==2.31.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from agentops) (2.31.0)\n",
+      "Requirement already satisfied: psutil==5.9.8 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from agentops) (5.9.8)\n",
+      "Requirement already satisfied: packaging==23.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from agentops) (23.2)\n",
+      "Requirement already satisfied: termcolor==2.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from agentops) (2.4.0)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests==2.31.0->agentops) (3.3.2)\n",
+      "Requirement already satisfied: idna<4,>=2.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests==2.31.0->agentops) (3.7)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests==2.31.0->agentops) (2.2.2)\n",
+      "Requirement already satisfied: certifi>=2017.4.17 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests==2.31.0->agentops) (2024.7.4)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n",
+      "Requirement already satisfied: python-dotenv in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (1.0.1)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n",
+      "Requirement already satisfied: langchain_openai in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (0.1.20)\n",
+      "Requirement already satisfied: langchain-core<0.3.0,>=0.2.26 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain_openai) (0.2.29)\n",
+      "Requirement already satisfied: openai<2.0.0,>=1.32.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain_openai) (1.40.2)\n",
+      "Requirement already satisfied: tiktoken<1,>=0.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain_openai) (0.7.0)\n",
+      "Requirement already satisfied: PyYAML>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (6.0.2)\n",
+      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (1.33)\n",
+      "Requirement already satisfied: langsmith<0.2.0,>=0.1.75 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (0.1.98)\n",
+      "Requirement already satisfied: packaging<25,>=23.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (23.2)\n",
+      "Requirement already satisfied: pydantic<3,>=1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (2.8.2)\n",
+      "Requirement already satisfied: tenacity!=8.4.0,<9.0.0,>=8.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (8.5.0)\n",
+      "Requirement already satisfied: typing-extensions>=4.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_openai) (4.12.2)\n",
+      "Requirement already satisfied: anyio<5,>=3.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.32.0->langchain_openai) (4.4.0)\n",
+      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.32.0->langchain_openai) (1.9.0)\n",
+      "Requirement already satisfied: httpx<1,>=0.23.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.32.0->langchain_openai) (0.27.0)\n",
+      "Requirement already satisfied: jiter<1,>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.32.0->langchain_openai) (0.4.2)\n",
+      "Requirement already satisfied: sniffio in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.32.0->langchain_openai) (1.3.1)\n",
+      "Requirement already satisfied: tqdm>4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from openai<2.0.0,>=1.32.0->langchain_openai) (4.66.5)\n",
+      "Requirement already satisfied: regex>=2022.1.18 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from tiktoken<1,>=0.7->langchain_openai) (2023.12.25)\n",
+      "Requirement already satisfied: requests>=2.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from tiktoken<1,>=0.7->langchain_openai) (2.31.0)\n",
+      "Requirement already satisfied: idna>=2.8 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from anyio<5,>=3.5.0->openai<2.0.0,>=1.32.0->langchain_openai) (3.7)\n",
+      "Requirement already satisfied: certifi in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai<2.0.0,>=1.32.0->langchain_openai) (2024.7.4)\n",
+      "Requirement already satisfied: httpcore==1.* in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai<2.0.0,>=1.32.0->langchain_openai) (1.0.5)\n",
+      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai<2.0.0,>=1.32.0->langchain_openai) (0.14.0)\n",
+      "Requirement already satisfied: jsonpointer>=1.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.3.0,>=0.2.26->langchain_openai) (3.0.0)\n",
+      "Requirement already satisfied: orjson<4.0.0,>=3.9.14 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langsmith<0.2.0,>=0.1.75->langchain-core<0.3.0,>=0.2.26->langchain_openai) (3.10.7)\n",
+      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3,>=1->langchain-core<0.3.0,>=0.2.26->langchain_openai) (0.7.0)\n",
+      "Requirement already satisfied: pydantic-core==2.20.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3,>=1->langchain-core<0.3.0,>=0.2.26->langchain_openai) (2.20.1)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken<1,>=0.7->langchain_openai) (3.3.2)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken<1,>=0.7->langchain_openai) (2.2.2)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n",
+      "Requirement already satisfied: langchain_groq in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (0.1.9)\n",
+      "Requirement already satisfied: groq<1,>=0.4.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain_groq) (0.9.0)\n",
+      "Requirement already satisfied: langchain-core<0.3.0,>=0.2.26 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain_groq) (0.2.29)\n",
+      "Requirement already satisfied: anyio<5,>=3.5.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from groq<1,>=0.4.1->langchain_groq) (4.4.0)\n",
+      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from groq<1,>=0.4.1->langchain_groq) (1.9.0)\n",
+      "Requirement already satisfied: httpx<1,>=0.23.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from groq<1,>=0.4.1->langchain_groq) (0.27.0)\n",
+      "Requirement already satisfied: pydantic<3,>=1.9.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from groq<1,>=0.4.1->langchain_groq) (2.8.2)\n",
+      "Requirement already satisfied: sniffio in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from groq<1,>=0.4.1->langchain_groq) (1.3.1)\n",
+      "Requirement already satisfied: typing-extensions<5,>=4.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from groq<1,>=0.4.1->langchain_groq) (4.12.2)\n",
+      "Requirement already satisfied: PyYAML>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_groq) (6.0.2)\n",
+      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_groq) (1.33)\n",
+      "Requirement already satisfied: langsmith<0.2.0,>=0.1.75 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_groq) (0.1.98)\n",
+      "Requirement already satisfied: packaging<25,>=23.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_groq) (23.2)\n",
+      "Requirement already satisfied: tenacity!=8.4.0,<9.0.0,>=8.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.26->langchain_groq) (8.5.0)\n",
+      "Requirement already satisfied: idna>=2.8 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from anyio<5,>=3.5.0->groq<1,>=0.4.1->langchain_groq) (3.7)\n",
+      "Requirement already satisfied: certifi in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->groq<1,>=0.4.1->langchain_groq) (2024.7.4)\n",
+      "Requirement already satisfied: httpcore==1.* in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpx<1,>=0.23.0->groq<1,>=0.4.1->langchain_groq) (1.0.5)\n",
+      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->groq<1,>=0.4.1->langchain_groq) (0.14.0)\n",
+      "Requirement already satisfied: jsonpointer>=1.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.3.0,>=0.2.26->langchain_groq) (3.0.0)\n",
+      "Requirement already satisfied: orjson<4.0.0,>=3.9.14 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langsmith<0.2.0,>=0.1.75->langchain-core<0.3.0,>=0.2.26->langchain_groq) (3.10.7)\n",
+      "Requirement already satisfied: requests<3,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langsmith<0.2.0,>=0.1.75->langchain-core<0.3.0,>=0.2.26->langchain_groq) (2.31.0)\n",
+      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3,>=1.9.0->groq<1,>=0.4.1->langchain_groq) (0.7.0)\n",
+      "Requirement already satisfied: pydantic-core==2.20.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3,>=1.9.0->groq<1,>=0.4.1->langchain_groq) (2.20.1)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langsmith<0.2.0,>=0.1.75->langchain-core<0.3.0,>=0.2.26->langchain_groq) (3.3.2)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langsmith<0.2.0,>=0.1.75->langchain-core<0.3.0,>=0.2.26->langchain_groq) (2.2.2)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n",
+      "Requirement already satisfied: langchain in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (0.2.12)\n",
+      "Requirement already satisfied: PyYAML>=5.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (6.0.2)\n",
+      "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (2.0.32)\n",
+      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (3.10.2)\n",
+      "Requirement already satisfied: langchain-core<0.3.0,>=0.2.27 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (0.2.29)\n",
+      "Requirement already satisfied: langchain-text-splitters<0.3.0,>=0.2.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (0.2.2)\n",
+      "Requirement already satisfied: langsmith<0.2.0,>=0.1.17 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (0.1.98)\n",
+      "Requirement already satisfied: numpy<2.0.0,>=1.26.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (1.26.4)\n",
+      "Requirement already satisfied: pydantic<3,>=1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (2.8.2)\n",
+      "Requirement already satisfied: requests<3,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (2.31.0)\n",
+      "Requirement already satisfied: tenacity!=8.4.0,<9.0.0,>=8.1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain) (8.5.0)\n",
+      "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (2.3.5)\n",
+      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.3.1)\n",
+      "Requirement already satisfied: attrs>=17.3.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (24.2.0)\n",
+      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.4.1)\n",
+      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (6.0.5)\n",
+      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.9.4)\n",
+      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.27->langchain) (1.33)\n",
+      "Requirement already satisfied: packaging<25,>=23.2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.27->langchain) (23.2)\n",
+      "Requirement already satisfied: typing-extensions>=4.7 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.27->langchain) (4.12.2)\n",
+      "Requirement already satisfied: orjson<4.0.0,>=3.9.14 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from langsmith<0.2.0,>=0.1.17->langchain) (3.10.7)\n",
+      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3,>=1->langchain) (0.7.0)\n",
+      "Requirement already satisfied: pydantic-core==2.20.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pydantic<3,>=1->langchain) (2.20.1)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langchain) (3.3.2)\n",
+      "Requirement already satisfied: idna<4,>=2.5 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langchain) (3.7)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langchain) (2.2.2)\n",
+      "Requirement already satisfied: certifi>=2017.4.17 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from requests<3,>=2->langchain) (2024.7.4)\n",
+      "Requirement already satisfied: jsonpointer>=1.9 in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.3.0,>=0.2.27->langchain) (3.0.0)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n",
+      "\u001b[31mERROR: Could not find a version that satisfies the requirement StringIO (from versions: none)\u001b[0m\u001b[31m\n",
+      "\u001b[0m\u001b[31mERROR: No matching distribution found for StringIO\u001b[0m\u001b[31m\n",
+      "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n",
+      "Requirement already satisfied: pymarkdown in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (0.1.4)\n",
+      "Requirement already satisfied: toolz in /Users/howardgil/Desktop/agentops/HowieG/env/lib/python3.12/site-packages (from pymarkdown) (0.12.1)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install -U crewai \n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv\n",
+    "%pip install -U langchain_openai\n",
+    "%pip install -U langchain_groq\n",
+    "%pip install -U langchain\n",
+    "%pip install -U StringIO\n",
+    "%pip install -U pymarkdown"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "844b50cb",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "3930dc4c82f117b6",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-09T20:15:44.216372Z",
+     "start_time": "2024-08-09T20:15:44.176432Z"
+    }
+   },
+   "outputs": [
+    {
+     "ename": "ModuleNotFoundError",
+     "evalue": "No module named 'StringIO'",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[4], line 9\u001b[0m\n\u001b[1;32m      7\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m      8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdotenv\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_dotenv\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mpymarkdown\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mapi\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m PyMarkdownApi, PyMarkdownApiException\n",
+      "File \u001b[0;32m~/Desktop/agentops/HowieG/env/lib/python3.12/site-packages/pymarkdown/__init__.py:1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m process\n",
+      "File \u001b[0;32m~/Desktop/agentops/HowieG/env/lib/python3.12/site-packages/pymarkdown/core.py:4\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mre\u001b[39;00m\n\u001b[1;32m      3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mcontextlib\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m contextmanager\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mStringIO\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m StringIO\n\u001b[1;32m      5\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mitertools\u001b[39;00m\n\u001b[1;32m      6\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01msys\u001b[39;00m\n",
+      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'StringIO'"
+     ]
+    }
+   ],
+   "source": [
+    "import sys\n",
+    "from crewai import Agent, Task\n",
+    "from langchain_groq import ChatGroq\n",
+    "from langchain.tools import tool\n",
+    "from langchain_openai import ChatOpenAI\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv\n",
+    "from pymarkdown.api import PyMarkdownApi, PyMarkdownApiException\n",
+    "from io import StringIO"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0e307923",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e0e9166a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "GROQ_API_KEY = os.getenv(\"GROQ_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "6a9283d4735b1226",
+   "metadata": {},
+   "source": [
+    "The first step in any AgentOps integration is to call `agentops.init()`"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "701a00a193b93118",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-09T20:15:44.217370Z",
+     "start_time": "2024-08-09T20:15:44.217328Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=['markdown_validator'])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "dba56fc45784bfa",
+   "metadata": {},
+   "source": [
+    "Lets start by creating our markdown validator tool"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "cb2152baa314da66",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "@tool(\"markdown_validation_tool\")\n",
+    "def markdown_validation_tool(file_path: str) -> str:\n",
+    "    \"\"\"\n",
+    "    A tool to review files for markdown syntax errors.\n",
+    "\n",
+    "    Returns:\n",
+    "    - validation_results: A list of validation results \n",
+    "    and suggestions on how to fix them.\n",
+    "    \"\"\"\n",
+    "\n",
+    "    print(\"\\n\\nValidating Markdown syntax...\\n\\n\" + file_path)\n",
+    "\n",
+    "    try:\n",
+    "        if not (os.path.exists(file_path)):\n",
+    "            return \"Could not validate file. The provided file path does not exist.\"\n",
+    "\n",
+    "        scan_result = PyMarkdownApi().scan_path(file_path.rstrip().lstrip())\n",
+    "        results = str(scan_result)\n",
+    "        return results  # Return the reviewed document\n",
+    "    except PyMarkdownApiException as this_exception:\n",
+    "        print(f\"API Exception: {this_exception}\", file=sys.stderr)\n",
+    "        return f\"API Exception: {str(this_exception)}\"\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4bbeec0eb7d000ca",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "groq_llm = ChatGroq(\n",
+    "    temperature=0,\n",
+    "    groq_api_key=GROQ_API_KEY,\n",
+    "    model_name=\"llama3-70b-8192\",\n",
+    ")\n",
+    "\n",
+    "default_llm = ChatOpenAI(openai_api_base=os.environ.get(\"OPENAI_API_BASE_URL\", \"https://api.openai.com/v1\"),\n",
+    "                        openai_api_key=OPENAI_API_KEY,\n",
+    "                        temperature=0.1,                        \n",
+    "                        model_name=os.environ.get(\"MODEL_NAME\", \"gpt-3.5-turbo\"))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "805ded98160f35ca",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "filename = \"README.md\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bae481e07b5fadc2",
+   "metadata": {},
+   "source": [
+    "Lets create our Agent with CrewAI"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3c9ca4fa0540a142",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "general_agent  = Agent(role='Requirements Manager',\n",
+    "                    goal=\"\"\"Provide a detailed list of the markdown \n",
+    "                            linting results. Give a summary with actionable \n",
+    "                            tasks to address the validation results. Write your \n",
+    "                            response as if you were handing it to a developer \n",
+    "                            to fix the issues.\n",
+    "                            DO NOT provide examples of how to fix the issues or\n",
+    "                            recommend other tools to use.\"\"\",\n",
+    "                    backstory=\"\"\"You are an expert business analyst \n",
+    "\t\t\t\t\tand software QA specialist. You provide high quality, \n",
+    "                    thorough, insightful and actionable feedback via \n",
+    "                    detailed list of changes and actionable tasks.\"\"\",\n",
+    "                    allow_delegation=False, \n",
+    "                    verbose=True,\n",
+    "                    tools=[markdown_validation_tool],\n",
+    "                    llm=default_llm)  #groq_llm) "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7940a03ceb4a55de",
+   "metadata": {},
+   "source": [
+    "Now lets create the task for our agent to complete"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "28b4abd52ff9bf86",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "syntax_review_task = Task(description=f\"\"\"\n",
+    "        Use the markdown_validation_tool to review \n",
+    "        the file(s) at this path: {filename}\n",
+    "        \n",
+    "        Be sure to pass only the file path to the markdown_validation_tool.\n",
+    "        Use the following format to call the markdown_validation_tool:\n",
+    "        Do I need to use a tool? Yes\n",
+    "        Action: markdown_validation_tool\n",
+    "        Action Input: {filename}\n",
+    "\n",
+    "        Get the validation results from the tool \n",
+    "        and then summarize it into a list of changes\n",
+    "        the developer should make to the document.\n",
+    "        DO NOT recommend ways to update the document.\n",
+    "        DO NOT change any of the content of the document or\n",
+    "        add content to it. It is critical to your task to\n",
+    "        only respond with a list of changes.\n",
+    "        \n",
+    "        If you already know the answer or if you do not need \n",
+    "        to use a tool, return it as your Final Answer.\"\"\",\n",
+    "        agent=general_agent,\n",
+    "        expected_output=\"\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7283562a262056d5",
+   "metadata": {},
+   "source": [
+    "Now lets run our task!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d5c5f01bee50b92a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "syntax_review_task.execute_sync()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "660cc410a9e847b7",
+   "metadata": {},
+   "source": [
+    "Finally, don't forget to end your AgentOps session!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6eeee1a76a26bd14",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.end_session('Success')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/langchain_examples.ipynb b/examples/langchain/langchain_examples.ipynb
similarity index 75%
rename from examples/langchain_examples.ipynb
rename to examples/langchain/langchain_examples.ipynb
index 4fcba18a..0069add4 100644
--- a/examples/langchain_examples.ipynb
+++ b/examples/langchain/langchain_examples.ipynb
@@ -7,9 +7,35 @@
    "source": [
     "# AgentOps Langchain Agent Implementation\n",
     "\n",
-    "Using AgentOps monitoring with Langchain is simple. We've created a LangchainCallbackHandler that will do all of the heavy lifting!\n",
-    "\n",
-    "First we'll import the typical Langchain packages:"
+    "Using AgentOps monitoring with Langchain is simple. We've created a LangchainCallbackHandler that will do all of the heavy lifting!"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "1516a90d",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e5fc8497",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U langchain\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "9480596a",
+   "metadata": {},
+   "source": [
+    "Then import them"
    ]
   },
   {
@@ -19,10 +45,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import os\n",
     "from langchain.chat_models import ChatOpenAI\n",
     "from langchain.agents import initialize_agent, AgentType\n",
-    "from langchain.agents import tool"
+    "from langchain.agents import tool\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
    ]
   },
   {
@@ -35,61 +62,59 @@
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "7e8f8cd098ad5b57",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "from agentops.partners.langchain_callback_handler import (\n",
     "    LangchainCallbackHandler as AgentOpsLangchainCallbackHandler,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "7e8f8cd098ad5b57",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "25f189b0",
+   "metadata": {},
    "source": [
-    "Next, we'll grab our two API keys. You can use dotenv like below or however else you like to load environment variables"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "14a1b8e08a2e9eb3"
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "974514a8",
+   "metadata": {},
    "outputs": [],
    "source": [
-    "from dotenv import load_dotenv\n",
-    "\n",
-    "load_dotenv()"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "ff6cfc570599871f",
-   "execution_count": null
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n",
-    "\n",
-    "Pass in your API key, and optionally any tags to describe this session for easier lookup in the AO dashboard."
-   ],
+   "id": "51f083697b783fa4",
    "metadata": {
     "collapsed": false
    },
-   "id": "51f083697b783fa4"
+   "source": [
+    "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n",
+    "\n",
+    "Optionally pass in any tags to describe this session for easier lookup in the AO dashboard."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "d432fe915edb6365",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n",
-    "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")\n",
-    "\n",
     "agentops_handler = AgentOpsLangchainCallbackHandler(\n",
     "    api_key=AGENTOPS_API_KEY, tags=[\"Langchain Example\"]\n",
     ")\n",
@@ -97,47 +122,47 @@
     "llm = ChatOpenAI(\n",
     "    openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "d432fe915edb6365",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "You can also retrieve the `session_id` of the newly created session."
-   ],
+   "id": "38d309f07363b58e",
    "metadata": {
     "collapsed": false
    },
-   "id": "38d309f07363b58e"
+   "source": [
+    "You can also retrieve the `session_id` of the newly created session."
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "print(\"Agent Ops session ID: \" + str(agentops_handler.session_id))"
-   ],
+   "execution_count": null,
+   "id": "f7e3a37cde3f9c22",
    "metadata": {
     "collapsed": false
    },
-   "id": "f7e3a37cde3f9c22",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "print(\"Agent Ops session ID: \" + str(agentops_handler.session_id))"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded."
-   ],
+   "id": "42f226ace56ef6f5",
    "metadata": {
     "collapsed": false
    },
-   "id": "42f226ace56ef6f5"
+   "source": [
+    "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "c103a2edbe837abd",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "@tool\n",
@@ -150,46 +175,48 @@
     "\n",
     "\n",
     "tools = [find_movie]"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "c103a2edbe837abd"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "For each tool, you need to also add the callback handler"
-   ],
+   "id": "4fb7633857b19bf0",
    "metadata": {
     "collapsed": false
    },
-   "id": "4fb7633857b19bf0"
+   "source": [
+    "For each tool, you need to also add the callback handler"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "a0345f08bf1c5ecd",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "for t in tools:\n",
     "    t.callbacks = [agentops_handler]"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "a0345f08bf1c5ecd"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Finally, let's use our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard"
-   ],
+   "id": "12a02b833716676b",
    "metadata": {
     "collapsed": false
    },
-   "id": "12a02b833716676b"
+   "source": [
+    "Finally, let's use our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "2d2e83fa69b30add",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "agent = initialize_agent(\n",
@@ -202,92 +229,93 @@
     "    ],  # You must pass in a callback handler to record your agent\n",
     "    handle_parsing_errors=True,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "2d2e83fa69b30add"
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "agent.run(\"What comedies are playing?\", callbacks=[agentops_handler])"
-   ],
+   "execution_count": null,
+   "id": "df2bc3a384493e1e",
    "metadata": {
     "collapsed": false
    },
-   "id": "df2bc3a384493e1e"
+   "outputs": [],
+   "source": [
+    "agent.run(\"What comedies are playing?\", callbacks=[agentops_handler])"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "2230edd919182a55",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "## Check your session\n",
     "Finally, check your run on [AgentOps](https://app.agentops.ai)\n",
     "![image.png](attachment:3d9393fa-3d6a-4193-b6c9-43413dc19d15.png)"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "2230edd919182a55"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "fbf4a3ec5fa60d74",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "# Async Agents\n",
     "\n",
     "Several langchain agents require async callback handlers. AgentOps also supports this."
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "fbf4a3ec5fa60d74"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "ed63a166b343e1a2",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "import os\n",
     "from langchain.chat_models import ChatOpenAI\n",
     "from langchain.agents import initialize_agent, AgentType\n",
-    "from langchain.agents import tool"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "ed63a166b343e1a2"
+    "from langchain.agents import tool\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "aa15223969f97b3d",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "from agentops.partners.langchain_callback_handler import (\n",
     "    AsyncLangchainCallbackHandler as AgentOpsAsyncLangchainCallbackHandler,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "aa15223969f97b3d"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "824e1d44",
+   "metadata": {},
    "outputs": [],
    "source": [
-    "from dotenv import load_dotenv\n",
-    "\n",
     "load_dotenv()\n",
-    "\n",
-    "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n",
-    "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "818357483f039b60"
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "ae76cfe058f5e4e4",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "agentops_handler = AgentOpsAsyncLangchainCallbackHandler(\n",
@@ -299,14 +327,15 @@
     ")\n",
     "\n",
     "print(\"Agent Ops session ID: \" + str(await agentops_handler.session_id))"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "ae76cfe058f5e4e4"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "1201049766be84a7",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "@tool\n",
@@ -322,14 +351,15 @@
     "\n",
     "for t in tools:\n",
     "    t.callbacks = [agentops_handler]"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "1201049766be84a7"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "8d4f9dd39b79d542",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "agent = initialize_agent(\n",
@@ -342,24 +372,20 @@
     ")\n",
     "\n",
     "await agent.arun(\"What comedies are playing?\")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "8d4f9dd39b79d542"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "fb276a2e-f1c3-4f0f-8818-b7730e9d3ff7",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "## Check your session\n",
     "Finally, check your run on [AgentOps](https://app.agentops.ai)\n",
     "\n",
     "![image.png](attachment:69f2121a-d437-4c09-bbbe-c76c9243ee19.png)"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "fb276a2e-f1c3-4f0f-8818-b7730e9d3ff7"
+   ]
   }
  ],
  "metadata": {
diff --git a/examples/litellm/litellm_example.ipynb b/examples/litellm/litellm_example.ipynb
new file mode 100644
index 00000000..78cccf3a
--- /dev/null
+++ b/examples/litellm/litellm_example.ipynb
@@ -0,0 +1,140 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### NOTE:\n",
+    "AgentOps requires that you import and call LiteLLM differently than LiteLLM's documentation.  \n",
+    "Instead of\n",
+    "``` python\n",
+    "from litellm import completion\n",
+    "completion()\n",
+    "```\n",
+    "You should import and call like this:\n",
+    "``` python\n",
+    "import litellm\n",
+    "litellm.completion()\n",
+    "```\n",
+    "\n",
+    "Please see examples below\n",
+    "\n",
+    "[See our LiteLLM docs](https://docs.agentops.ai/v1/integrations/litellm)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U litellm\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import litellm\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "LiteLLM allows you to use several models including from OpenAI, Llama, Mistral, Claude, Gemini, Gemma, Dall-E, Whisper, and more all using the OpenAI format. To use a different model all you need to change are the API KEY and model (litellm.completion(model=\"...\"))."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = (\n",
+    "    os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    ")  # or the provider of your choosing\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"litellm-example\"])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [{\"role\": \"user\", \"content\": \"Write a 12 word poem about secret agents.\"}]\n",
+    "response = litellm.completion(model=\"gpt-4\", messages=messages) # or the model of your choosing\n",
+    "print(response.choices[0].message.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agentops.end_session(\"Success\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/multi_agent_example.ipynb b/examples/multi_agent_example.ipynb
index 7254f87b..bc705d46 100644
--- a/examples/multi_agent_example.ipynb
+++ b/examples/multi_agent_example.ipynb
@@ -14,6 +14,34 @@
     "This is an example implementation of tracking events from two separate agents"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "c4e0d5ff",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5439d798",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8f69131d",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -28,14 +56,21 @@
    "source": [
     "import agentops\n",
     "from agentops import track_agent\n",
-    "from dotenv import load_dotenv\n",
-    "import os\n",
     "from openai import OpenAI\n",
+    "import os\n",
+    "from dotenv import load_dotenv\n",
     "import logging\n",
-    "\n",
     "from IPython.display import display, Markdown"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "6a65f091",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -49,8 +84,8 @@
    "outputs": [],
    "source": [
     "load_dotenv()\n",
-    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\", \"\")\n",
-    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\", \"\")\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
     "logging.basicConfig(\n",
     "    level=logging.DEBUG\n",
     ")  # this will let us see that calls are assigned to an agent"
@@ -68,7 +103,7 @@
    },
    "outputs": [],
    "source": [
-    "agentops.init(AGENTOPS_API_KEY, tags=[\"multi-agent-notebook\"])\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"multi-agent-notebook\"])\n",
     "openai_client = OpenAI(api_key=OPENAI_API_KEY)"
    ]
   },
diff --git a/examples/multi_agent_groq_example.ipynb b/examples/multi_agent_groq_example.ipynb
index bb63481b..c86e83aa 100644
--- a/examples/multi_agent_groq_example.ipynb
+++ b/examples/multi_agent_groq_example.ipynb
@@ -14,9 +14,37 @@
     "This is an example implementation of tracking events from two separate agents"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "fde50a03",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f846ae29",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U agentops\n",
+    "%pip install -U groq\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "882b027b",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "id": "7c566fac57d3b6ce",
    "metadata": {
     "collapsed": false,
@@ -36,9 +64,17 @@
     "from IPython.display import display, Markdown"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "d614aaf3",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "id": "9f8c52496c04693",
    "metadata": {
     "collapsed": false,
@@ -49,8 +85,8 @@
    "outputs": [],
    "source": [
     "load_dotenv()\n",
-    "GROQ_API_KEY = os.getenv(\"GROQ_API_KEY\", \"\")\n",
-    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\", \"\")\n",
+    "GROQ_API_KEY = os.getenv(\"GROQ_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
     "logging.basicConfig(\n",
     "    level=logging.DEBUG\n",
     ")  # this will let us see that calls are assigned to an agent"
@@ -58,7 +94,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "af062552554d60ce",
    "metadata": {
     "collapsed": false,
@@ -66,24 +102,9 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_session HTTP/11\" 200 204\n",
-      "πŸ–‡ AgentOps: \u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=892edb44-774d-4f52-a9b8-4d4eada5b434\u001b[0m\u001b[0m\n",
-      "INFO:agentops:\u001b[34m\u001b[34mSession Replay: https://app.agentops.ai/drilldown?session_id=892edb44-774d-4f52-a9b8-4d4eada5b434\u001b[0m\u001b[0m\n",
-      "DEBUG:httpx:load_ssl_context verify=True cert=None trust_env=True http2=False\n",
-      "DEBUG:httpx:load_verify_locations cafile='/Users/manu_suryavansh/miniforge3/envs/agentsops_dev/lib/python3.11/site-packages/certifi/cacert.pem'\n",
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_events HTTP/11\" 200 9\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "agentops.init(AGENTOPS_API_KEY, tags=[\"multi-agent-groq-notebook\"])\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"multi-agent-groq-notebook\"])\n",
     "groq_client = Groq(api_key=GROQ_API_KEY)"
    ]
   },
@@ -102,7 +123,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "id": "727e3cc26ce3ec3",
    "metadata": {
     "collapsed": false,
@@ -148,7 +169,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "id": "79b75d65de738522",
    "metadata": {
     "collapsed": false,
@@ -156,18 +177,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_agent HTTP/11\" 200 9\n",
-      "DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.agentops.ai:443\n",
-      "DEBUG:urllib3.connectionpool:https://api.agentops.ai:443 \"POST /v2/create_agent HTTP/11\" 200 9\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "qa = QaAgent()\n",
     "engineer = EngineerAgent()"
@@ -190,42 +200,17 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "id": "7272b927-67ef-4b8c-84a5-63ed06f75aa5",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:groq._base_client:Request options: {'method': 'post', 'url': '/openai/v1/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': 'You are a software engineer and only output python code, no markdown tags.'}, {'role': 'user', 'content': 'python function to test prime number'}], 'model': 'llama3-70b-8192', 'temperature': 0.5}}\n",
-      "DEBUG:groq._base_client:Sending HTTP Request: POST https://api.groq.com/openai/v1/chat/completions\n",
-      "DEBUG:httpcore.connection:connect_tcp.started host='api.groq.com' port=443 local_address=None timeout=5.0 socket_options=None\n",
-      "DEBUG:httpcore.connection:connect_tcp.complete return_value=\n",
-      "DEBUG:httpcore.connection:start_tls.started ssl_context= server_hostname='api.groq.com' timeout=5.0\n",
-      "DEBUG:httpcore.connection:start_tls.complete return_value=\n",
-      "DEBUG:httpcore.http11:send_request_headers.started request=\n",
-      "DEBUG:httpcore.http11:send_request_headers.complete\n",
-      "DEBUG:httpcore.http11:send_request_body.started request=\n",
-      "DEBUG:httpcore.http11:send_request_body.complete\n",
-      "DEBUG:httpcore.http11:receive_response_headers.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Sun, 21 Jul 2024 05:55:22 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'Cache-Control', b'private, max-age=0, no-store, no-cache, must-revalidate'), (b'vary', b'Origin'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'30000'), (b'x-ratelimit-remaining-requests', b'49999'), (b'x-ratelimit-remaining-tokens', b'29963'), (b'x-ratelimit-reset-requests', b'1.728s'), (b'x-ratelimit-reset-tokens', b'74ms'), (b'x-request-id', b'req_01j39xqscce4dbg5h08vrftym2'), (b'via', b'1.1 google'), (b'alt-svc', b'h3=\":443\"; ma=86400'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=vDBNcm.4NuP7B9MJyHy7WVBS7CVF.SyvXXsf7ZXdpT8-1721541322-1.0.1.1-QRg7ZBBgC845heu3O2ZfJySw1nqhlOCwpF29NmD1H9xnMUNFOstcyHCHabYKSBZXq6iNGbkYaId01XpPYOfuWQ; path=/; expires=Sun, 21-Jul-24 06:25:22 GMT; domain=.groq.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'8a68f10f2ba89652-SJC'), (b'Content-Encoding', b'gzip')])\n",
-      "INFO:httpx:HTTP Request: POST https://api.groq.com/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
-      "DEBUG:httpcore.http11:receive_response_body.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_body.complete\n",
-      "DEBUG:httpcore.http11:response_closed.started\n",
-      "DEBUG:httpcore.http11:response_closed.complete\n",
-      "DEBUG:groq._base_client:HTTP Response: POST https://api.groq.com/openai/v1/chat/completions \"200 OK\" Headers({'date': 'Sun, 21 Jul 2024 05:55:22 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'cache-control': 'private, max-age=0, no-store, no-cache, must-revalidate', 'vary': 'Origin', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '30000', 'x-ratelimit-remaining-requests': '49999', 'x-ratelimit-remaining-tokens': '29963', 'x-ratelimit-reset-requests': '1.728s', 'x-ratelimit-reset-tokens': '74ms', 'x-request-id': 'req_01j39xqscce4dbg5h08vrftym2', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'set-cookie': '__cf_bm=vDBNcm.4NuP7B9MJyHy7WVBS7CVF.SyvXXsf7ZXdpT8-1721541322-1.0.1.1-QRg7ZBBgC845heu3O2ZfJySw1nqhlOCwpF29NmD1H9xnMUNFOstcyHCHabYKSBZXq6iNGbkYaId01XpPYOfuWQ; path=/; expires=Sun, 21-Jul-24 06:25:22 GMT; domain=.groq.com; HttpOnly; Secure; SameSite=None', 'server': 'cloudflare', 'cf-ray': '8a68f10f2ba89652-SJC', 'content-encoding': 'gzip'})\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "generated_func = engineer.completion(\"python function to test prime number\")"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": null,
    "id": "830b86dac47dceb3",
    "metadata": {
     "collapsed": false,
@@ -233,40 +218,14 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "data": {
-      "text/markdown": [
-       "```python\n",
-       "def is_prime(n):\n",
-       "    if n <= 1:\n",
-       "        return False\n",
-       "    if n == 2:\n",
-       "        return True\n",
-       "    if n % 2 == 0:\n",
-       "        return False\n",
-       "    max_divisor = int(n**0.5) + 1\n",
-       "    for d in range(3, max_divisor, 2):\n",
-       "        if n % d == 0:\n",
-       "            return False\n",
-       "    return True\n",
-       "```"
-      ],
-      "text/plain": [
-       ""
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
+   "outputs": [],
    "source": [
     "display(Markdown(\"```python\\n\" + generated_func + \"\\n```\"))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": null,
    "id": "63c9d0d457aee91a",
    "metadata": {
     "collapsed": false,
@@ -274,28 +233,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:groq._base_client:Request options: {'method': 'post', 'url': '/openai/v1/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': 'You are a qa engineer and only output python code, no markdown tags.'}, {'role': 'user', 'content': 'Write a python unit test that test the following function: \\n def is_prime(n):\\n    if n <= 1:\\n        return False\\n    if n == 2:\\n        return True\\n    if n % 2 == 0:\\n        return False\\n    max_divisor = int(n**0.5) + 1\\n    for d in range(3, max_divisor, 2):\\n        if n % d == 0:\\n            return False\\n    return True'}], 'model': 'llama3-70b-8192', 'temperature': 0.5}}\n",
-      "DEBUG:groq._base_client:Sending HTTP Request: POST https://api.groq.com/openai/v1/chat/completions\n",
-      "DEBUG:httpcore.http11:send_request_headers.started request=\n",
-      "DEBUG:httpcore.http11:send_request_headers.complete\n",
-      "DEBUG:httpcore.http11:send_request_body.started request=\n",
-      "DEBUG:httpcore.http11:send_request_body.complete\n",
-      "DEBUG:httpcore.http11:receive_response_headers.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Sun, 21 Jul 2024 05:55:23 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'Cache-Control', b'private, max-age=0, no-store, no-cache, must-revalidate'), (b'vary', b'Origin'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'30000'), (b'x-ratelimit-remaining-requests', b'49998'), (b'x-ratelimit-remaining-tokens', b'29845'), (b'x-ratelimit-reset-requests', b'2.960999999s'), (b'x-ratelimit-reset-tokens', b'310ms'), (b'x-request-id', b'req_01j39xqsy5fxgth4w9q6r24h9w'), (b'via', b'1.1 google'), (b'alt-svc', b'h3=\":443\"; ma=86400'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Server', b'cloudflare'), (b'CF-RAY', b'8a68f112be2c9652-SJC'), (b'Content-Encoding', b'gzip')])\n",
-      "INFO:httpx:HTTP Request: POST https://api.groq.com/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
-      "DEBUG:httpcore.http11:receive_response_body.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_body.complete\n",
-      "DEBUG:httpcore.http11:response_closed.started\n",
-      "DEBUG:httpcore.http11:response_closed.complete\n",
-      "DEBUG:groq._base_client:HTTP Response: POST https://api.groq.com/openai/v1/chat/completions \"200 OK\" Headers({'date': 'Sun, 21 Jul 2024 05:55:23 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'cache-control': 'private, max-age=0, no-store, no-cache, must-revalidate', 'vary': 'Origin', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '30000', 'x-ratelimit-remaining-requests': '49998', 'x-ratelimit-remaining-tokens': '29845', 'x-ratelimit-reset-requests': '2.960999999s', 'x-ratelimit-reset-tokens': '310ms', 'x-request-id': 'req_01j39xqsy5fxgth4w9q6r24h9w', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'server': 'cloudflare', 'cf-ray': '8a68f112be2c9652-SJC', 'content-encoding': 'gzip'})\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "generated_test = qa.completion(\n",
     "    \"Write a python unit test that test the following function: \\n \" + generated_func\n",
@@ -304,7 +242,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": null,
    "id": "a88ffcbd2015d422",
    "metadata": {
     "collapsed": false,
@@ -312,67 +250,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "data": {
-      "text/markdown": [
-       "```python\n",
-       "import unittest\n",
-       "\n",
-       "def is_prime(n):\n",
-       "    if n <= 1:\n",
-       "        return False\n",
-       "    if n == 2:\n",
-       "        return True\n",
-       "    if n % 2 == 0:\n",
-       "        return False\n",
-       "    max_divisor = int(n**0.5) + 1\n",
-       "    for d in range(3, max_divisor, 2):\n",
-       "        if n % d == 0:\n",
-       "            return False\n",
-       "    return True\n",
-       "\n",
-       "class TestIsPrimeFunction(unittest.TestCase):\n",
-       "    def test_negative_numbers(self):\n",
-       "        self.assertFalse(is_prime(-1))\n",
-       "        self.assertFalse(is_prime(-2))\n",
-       "        self.assertFalse(is_prime(-3))\n",
-       "\n",
-       "    def test_zero_and_one(self):\n",
-       "        self.assertFalse(is_prime(0))\n",
-       "        self.assertFalse(is_prime(1))\n",
-       "\n",
-       "    def test_two(self):\n",
-       "        self.assertTrue(is_prime(2))\n",
-       "\n",
-       "    def test_even_numbers(self):\n",
-       "        self.assertFalse(is_prime(4))\n",
-       "        self.assertFalse(is_prime(6))\n",
-       "        self.assertFalse(is_prime(8))\n",
-       "\n",
-       "    def test_prime_numbers(self):\n",
-       "        self.assertTrue(is_prime(3))\n",
-       "        self.assertTrue(is_prime(5))\n",
-       "        self.assertTrue(is_prime(7))\n",
-       "        self.assertTrue(is_prime(11))\n",
-       "        self.assertTrue(is_prime(13))\n",
-       "\n",
-       "    def test_large_prime_numbers(self):\n",
-       "        self.assertTrue(is_prime(104729))\n",
-       "        self.assertTrue(is_prime(105013))\n",
-       "\n",
-       "if __name__ == '__main__':\n",
-       "    unittest.main()\n",
-       "```"
-      ],
-      "text/plain": [
-       ""
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
+   "outputs": [],
    "source": [
     "display(Markdown(\"```python\\n\" + generated_test + \"\\n```\"))"
    ]
@@ -405,7 +283,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": null,
    "id": "122e923cb07fd5f4",
    "metadata": {
     "collapsed": false,
@@ -413,38 +291,7 @@
      "outputs_hidden": false
     }
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:groq._base_client:Request options: {'method': 'post', 'url': '/openai/v1/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': 'You are not a tracked agent'}, {'role': 'user', 'content': 'Say hello'}], 'model': 'llama3-70b-8192'}}\n",
-      "DEBUG:groq._base_client:Sending HTTP Request: POST https://api.groq.com/openai/v1/chat/completions\n",
-      "DEBUG:httpcore.http11:send_request_headers.started request=\n",
-      "DEBUG:httpcore.http11:send_request_headers.complete\n",
-      "DEBUG:httpcore.http11:send_request_body.started request=\n",
-      "DEBUG:httpcore.http11:send_request_body.complete\n",
-      "DEBUG:httpcore.http11:receive_response_headers.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Sun, 21 Jul 2024 05:55:24 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'Cache-Control', b'private, max-age=0, no-store, no-cache, must-revalidate'), (b'vary', b'Origin'), (b'x-ratelimit-limit-requests', b'50000'), (b'x-ratelimit-limit-tokens', b'30000'), (b'x-ratelimit-remaining-requests', b'49998'), (b'x-ratelimit-remaining-tokens', b'29982'), (b'x-ratelimit-reset-requests', b'3.318s'), (b'x-ratelimit-reset-tokens', b'36ms'), (b'x-request-id', b'req_01j39xqvrgem4bfd3gqybths6c'), (b'via', b'1.1 google'), (b'alt-svc', b'h3=\":443\"; ma=86400'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Server', b'cloudflare'), (b'CF-RAY', b'8a68f11e6dd59652-SJC'), (b'Content-Encoding', b'gzip')])\n",
-      "INFO:httpx:HTTP Request: POST https://api.groq.com/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
-      "DEBUG:httpcore.http11:receive_response_body.started request=\n",
-      "DEBUG:httpcore.http11:receive_response_body.complete\n",
-      "DEBUG:httpcore.http11:response_closed.started\n",
-      "DEBUG:httpcore.http11:response_closed.complete\n",
-      "DEBUG:groq._base_client:HTTP Response: POST https://api.groq.com/openai/v1/chat/completions \"200 OK\" Headers({'date': 'Sun, 21 Jul 2024 05:55:24 GMT', 'content-type': 'application/json', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'cache-control': 'private, max-age=0, no-store, no-cache, must-revalidate', 'vary': 'Origin', 'x-ratelimit-limit-requests': '50000', 'x-ratelimit-limit-tokens': '30000', 'x-ratelimit-remaining-requests': '49998', 'x-ratelimit-remaining-tokens': '29982', 'x-ratelimit-reset-requests': '3.318s', 'x-ratelimit-reset-tokens': '36ms', 'x-request-id': 'req_01j39xqvrgem4bfd3gqybths6c', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'cf-cache-status': 'DYNAMIC', 'server': 'cloudflare', 'cf-ray': '8a68f11e6dd59652-SJC', 'content-encoding': 'gzip'})\n"
-     ]
-    },
-    {
-     "data": {
-      "text/plain": [
-       "'Hello!'"
-      ]
-     },
-     "execution_count": 10,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "res = groq_client.chat.completions.create(\n",
     "    model=\"llama3-70b-8192\",\n",
@@ -468,14 +315,6 @@
    "source": [
     "You'll notice that we didn't log an agent name, so the AgentOps backend will assign it to the Default Agent for the session!"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "d7a167c1-61f3-4499-8790-ec001e361e39",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -494,7 +333,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.9"
+   "version": "3.12.3"
   }
  },
  "nbformat": 4,
diff --git a/examples/multi_session_llm.ipynb b/examples/multi_session_llm.ipynb
index 6013be80..a13cd65e 100644
--- a/examples/multi_session_llm.ipynb
+++ b/examples/multi_session_llm.ipynb
@@ -2,14 +2,42 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "source": [
-    "# Multiple Concurrent Sessions\n",
-    "This example will show you how to run multiple sessions concurrently, assigning LLM cals to a specific session."
-   ],
+   "id": "a0fe80a38dec2f7b",
    "metadata": {
     "collapsed": false
    },
-   "id": "a0fe80a38dec2f7b"
+   "source": [
+    "# Multiple Concurrent Sessions\n",
+    "This example will show you how to run multiple sessions concurrently, assigning LLM cals to a specific session."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "ef25b661",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f507526f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "b6abd496",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
   },
   {
    "cell_type": "code",
@@ -20,49 +48,73 @@
    },
    "outputs": [],
    "source": [
-    "import agentops\n",
     "from openai import OpenAI\n",
-    "from dotenv import load_dotenv\n",
+    "import agentops\n",
     "from agentops import ActionEvent\n",
-    "\n",
-    "load_dotenv()"
+    "import os\n",
+    "from dotenv import load_dotenv"
    ]
   },
   {
    "cell_type": "markdown",
+   "id": "c1da7e59",
+   "metadata": {},
    "source": [
-    "First, of course, lets init AgentOps. We're going to bypass creating a session automatically for the sake of showing it below."
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "da9cf64965c86ee9"
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "8325866b",
+   "metadata": {},
    "outputs": [],
    "source": [
-    "agentops.init(auto_start_session=False)\n",
-    "openai = OpenAI()"
-   ],
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "da9cf64965c86ee9",
    "metadata": {
     "collapsed": false
    },
+   "source": [
+    "Then, of course, lets init AgentOps. We're going to bypass creating a session automatically for the sake of showing it below."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "id": "39af2cd027ce268",
-   "execution_count": null
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "agentops.init(AGENTOPS_API_KEY, auto_start_session=False)\n",
+    "openai = OpenAI()"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Now lets create two sessions, each with an identifiable tag."
-   ],
+   "id": "9501d298aec35510",
    "metadata": {
     "collapsed": false
    },
-   "id": "9501d298aec35510"
+   "source": [
+    "Now lets create two sessions, each with an identifiable tag."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "4f24d06dd29579ff",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "session_1 = agentops.start_session(tags=[\"multi-session-test-1\"])\n",
@@ -70,49 +122,49 @@
     "\n",
     "print(\"session_id_1: {}\".format(session_1.session_id))\n",
     "print(\"session_id_2: {}\".format(session_2.session_id))"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "4f24d06dd29579ff",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "## LLM Calls\n",
-    "Now lets go ahead and make our first OpenAI LLM call. The challenge with having multiple sessions at the same time is that there is no way for AgentOps to know what LLM call is intended to pertain to what active session. This means we need to do a little extra work in one of two ways."
-   ],
+   "id": "38f373b7a8878a68",
    "metadata": {
     "collapsed": false
    },
-   "id": "38f373b7a8878a68"
+   "source": [
+    "## LLM Calls\n",
+    "Now lets go ahead and make our first OpenAI LLM call. The challenge with having multiple sessions at the same time is that there is no way for AgentOps to know what LLM call is intended to pertain to what active session. This means we need to do a little extra work in one of two ways."
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "messages = [{\"role\": \"user\", \"content\": \"Hello\"}]"
-   ],
+   "execution_count": null,
+   "id": "8a2d65f5fcdb137",
    "metadata": {
     "collapsed": false
    },
-   "id": "8a2d65f5fcdb137",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "messages = [{\"role\": \"user\", \"content\": \"Hello\"}]"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "### Patching Function\n",
-    "This method involves wrapping the LLM call withing a function on session. It can look a little counter-intuitive, but it easily tells us what session the call belongs to."
-   ],
+   "id": "e1859e37b65669b2",
    "metadata": {
     "collapsed": false
    },
-   "id": "e1859e37b65669b2"
+   "source": [
+    "### Patching Function\n",
+    "This method involves wrapping the LLM call withing a function on session. It can look a little counter-intuitive, but it easily tells us what session the call belongs to."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "106a1c899602bd33",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "# option 1: use session.patch\n",
@@ -121,26 +173,26 @@
     "    messages=messages,\n",
     "    temperature=0.5,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "106a1c899602bd33",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "### Create patched function\n",
-    "If you're using the create function multiple times, you can create a new function with the same method"
-   ],
+   "id": "3e129661929e8368",
    "metadata": {
     "collapsed": false
    },
-   "id": "3e129661929e8368"
+   "source": [
+    "### Create patched function\n",
+    "If you're using the create function multiple times, you can create a new function with the same method"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "be3b866ee04ef767",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "observed_create = session_1.patch(openai.chat.completions.create)\n",
@@ -149,102 +201,99 @@
     "    messages=messages,\n",
     "    temperature=0.5,\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "be3b866ee04ef767",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "### Keyword Argument\n",
-    "Alternatively, you can also pass the session into the LLM function call as a keyword argument. While this method works and is a bit more readable, it is not a \"pythonic\" pattern and can lead to linting errors in the code, as the base function is not expecting a `session` keyword."
-   ],
+   "id": "ec03dbfb7a185d1d",
    "metadata": {
     "collapsed": false
    },
-   "id": "ec03dbfb7a185d1d"
+   "source": [
+    "### Keyword Argument\n",
+    "Alternatively, you can also pass the session into the LLM function call as a keyword argument. While this method works and is a bit more readable, it is not a \"pythonic\" pattern and can lead to linting errors in the code, as the base function is not expecting a `session` keyword."
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "4ad4c7629509b4be",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "# option 2: add session as a keyword argument\n",
     "response2 = openai.chat.completions.create(\n",
     "    model=\"gpt-3.5-turbo\", messages=messages, temperature=0.5, session=session_2\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "4ad4c7629509b4be"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "e6de84850aa2e135",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "## Recording Events\n",
     "Outside of LLM calls, there are plenty of other events that we want to track. You can learn more about these events [here](https://docs.agentops.ai/v1/concepts/events).\n",
     "\n",
     "Recording these events on a session is as simple as `session.record(...)`"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "e6de84850aa2e135"
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "session_1.record(ActionEvent(action_type=\"test event\"))"
-   ],
+   "execution_count": null,
+   "id": "964e3073bac33223",
    "metadata": {
     "collapsed": false
    },
-   "id": "964e3073bac33223"
+   "outputs": [],
+   "source": [
+    "session_1.record(ActionEvent(action_type=\"test event\"))"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Now let's go ahead and end the sessions"
-   ],
+   "id": "43ac0b9b99eab5c7",
    "metadata": {
     "collapsed": false
    },
-   "id": "43ac0b9b99eab5c7"
+   "source": [
+    "Now let's go ahead and end the sessions"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "7e3050abcb72421b",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "session_1.end_session(end_state=\"Success\")\n",
     "session_2.end_session(end_state=\"Success\")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "7e3050abcb72421b",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "If you look in the AgentOps dashboard for these sessions, you will see two unique sessions, both with one LLM Event each, one with an Action Event as well."
-   ],
+   "id": "53ea2b8dfee6270a",
    "metadata": {
     "collapsed": false
    },
-   "id": "53ea2b8dfee6270a"
+   "source": [
+    "If you look in the AgentOps dashboard for these sessions, you will see two unique sessions, both with one LLM Event each, one with an Action Event as well."
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [],
+   "id": "dbc7483434f8c147",
    "metadata": {
     "collapsed": false
    },
-   "id": "dbc7483434f8c147"
+   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/multion/Autonomous_web_browsing.ipynb b/examples/multion/Autonomous_web_browsing.ipynb
index 10c78d1d..f5b6a269 100644
--- a/examples/multion/Autonomous_web_browsing.ipynb
+++ b/examples/multion/Autonomous_web_browsing.ipynb
@@ -15,25 +15,30 @@
     "Furthermore, events and LLM calls in your Python program will be tracked as well."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Install dependencies\n",
     "%pip install -U multion\n",
     "%pip install -U agentops\n",
-    "%pip install -U openai"
+    "%pip install -U openai\n",
+    "%pip install -U python-dotenv"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Tracking MultiOn events with AgentOps\n",
-    "\n",
-    "When an `agentops_api_key` is provided, MultiOn will automatically start an AgentOps session and record events."
+    "Then import them"
    ]
   },
   {
@@ -42,11 +47,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
+    "from multion.client import MultiOn\n",
+    "from multion.core.request_options import RequestOptions\n",
+    "import openai\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -55,14 +68,30 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from multion.client import MultiOn\n",
-    "from multion.core.request_options import RequestOptions\n",
-    "import openai\n",
-    "import agentops\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Tracking MultiOn events with AgentOps\n",
     "\n",
+    "When an `agentops_api_key` is provided, MultiOn will automatically start an AgentOps session and record events."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "cmd = \"what three things do i get with agentops\"\n",
     "request_options = RequestOptions(\n",
@@ -77,7 +106,10 @@
     "    request_options=request_options,\n",
     ")\n",
     "\n",
-    "print(browse_response.message)"
+    "print(browse_response.message)\n",
+    "\n",
+    "# End session to see your dashboard\n",
+    "agentops.end_session(\"Success\")"
    ]
   },
   {
@@ -94,7 +126,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "agentops.init(auto_start_session=False, tags=[\"MultiOn browse example\"])"
+    "agentops.init(AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"])"
    ]
   },
   {
@@ -111,8 +143,8 @@
    "outputs": [],
    "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "cmd = \"what three things do i get with agentops\"\n",
     "request_options = RequestOptions(\n",
@@ -161,7 +193,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# End session to see your dashboard\n",
     "agentops.end_session(\"Success\")"
    ]
   },
@@ -179,11 +210,6 @@
     "\n",
     "![image.png](attachment:image.png)"
    ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/multion/Sample_browsing_agent.ipynb b/examples/multion/Sample_browsing_agent.ipynb
index ef964bff..a0b890fc 100644
--- a/examples/multion/Sample_browsing_agent.ipynb
+++ b/examples/multion/Sample_browsing_agent.ipynb
@@ -15,16 +15,30 @@
     "Furthermore, events and LLM calls in your Python program will be tracked as well."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Install dependencies\n",
     "%pip install -U multion\n",
     "%pip install -U agentops\n",
-    "%pip install -U openai"
+    "%pip install -U openai\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
    ]
   },
   {
@@ -33,11 +47,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import os\n",
     "from multion.client import MultiOn\n",
     "from multion.core.request_options import RequestOptions\n",
     "import openai\n",
-    "import agentops"
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -46,11 +68,20 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
-    "import os\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Check your session\n",
+    "Check your session on [AgentOps](https://app.agentops.ai). This session should include the MultiOn browse action and the OpenAI call.\n",
     "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "![image.png](attachment:image.png)"
    ]
   },
   {
@@ -67,7 +98,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "agentops.init(auto_start_session=False, tags=[\"MultiOn browse example\"])"
+    "agentops.init(AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"])"
    ]
   },
   {
@@ -84,8 +115,8 @@
    "outputs": [],
    "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "cmd = \"what three things do i get with agentops\"\n",
     "request_options = RequestOptions(\n",
diff --git a/examples/multion/Step_by_step_web_browsing.ipynb b/examples/multion/Step_by_step_web_browsing.ipynb
index 652a867f..7069ad9e 100644
--- a/examples/multion/Step_by_step_web_browsing.ipynb
+++ b/examples/multion/Step_by_step_web_browsing.ipynb
@@ -12,6 +12,13 @@
     "This example shows how to use MultiOn's session creator to launch a self-directed browser agent that accomplishes a specified objective using Step Mode. MultiOn agents can either accomplish tasks fully autonomously or managed one step at a time. In this example, we will launch a MutliOn agent and manage each individual step. "
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -19,17 +26,15 @@
    "outputs": [],
    "source": [
     "%pip install -U multion\n",
-    "%pip install -U agentops"
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Session streaming\n",
-    "In this example, we'll use MultiOn to stream individual steps to accomplish a task. To track your runs in the AgentOps dashboard, specify an `agentops_api_key` when initializing `MultiOn()`\n",
-    "\n",
-    "You can run MultiOn without running `agentops.init()`. However, you will only see events from MultiOn, and not any from your own agent.\n"
+    "Then import them"
    ]
   },
   {
@@ -38,11 +43,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
+    "import multion\n",
+    "from multion.client import MultiOn\n",
+    "from multion.sessions.types.sessions_step_request_browser_params import (\n",
+    "    SessionsStepRequestBrowserParams,\n",
+    ")\n",
+    "from multion.core.request_options import RequestOptions\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -51,16 +67,40 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import multion\n",
-    "from multion.client import MultiOn\n",
-    "from multion.sessions.types.sessions_step_request_browser_params import (\n",
-    "    SessionsStepRequestBrowserParams,\n",
-    ")\n",
-    "from multion.core.request_options import RequestOptions\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Check your session\n",
+    "Check your session on [AgentOps](https://app.agentops.ai). This session should include the MultiOn browse action and the OpenAI call.\n",
+    "\n",
+    "![image.png](attachment:image.png)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Session streaming\n",
+    "In this example, we'll use MultiOn to stream individual steps to accomplish a task. To track your runs in the AgentOps dashboard, specify an `agentops_api_key` when initializing `MultiOn()`\n",
     "\n",
+    "You can run MultiOn without running `agentops.init()`. However, you will only see events from MultiOn, and not any from your own agent.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "\n",
     "url = \"https://www.agentops.ai/\"\n",
@@ -106,18 +146,6 @@
     "Step stream is just like step, but it streams responses in the same way a streamed LLM response is received. Instead of waiting for the entire step to complete, MultiOn will return work in progress. To track your runs in the AgentOps dashboard, specify an `agentops_api_key` when initializing `MultiOn()`"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"e8cbbd0f8fa042f49f267a44bf97425c\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"a640373b-30ae-4655-a1f3-5caa882a8721\""
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -135,8 +163,8 @@
     "import os\n",
     "\n",
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "\n",
     "url = \"https://www.agentops.ai/\"\n",
diff --git a/examples/multion/Webpage_data_retrieval.ipynb b/examples/multion/Webpage_data_retrieval.ipynb
index 352090aa..9fc89741 100644
--- a/examples/multion/Webpage_data_retrieval.ipynb
+++ b/examples/multion/Webpage_data_retrieval.ipynb
@@ -12,6 +12,13 @@
     "This example shows how to use MultiOn's session creator to launch a self-directed browser agent that accomplishes a specified objective."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -19,7 +26,15 @@
    "outputs": [],
    "source": [
     "%pip install -U multion\n",
-    "%pip install -U agentops"
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Then import them"
    ]
   },
   {
@@ -28,11 +43,18 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set your API keys\n",
+    "import multion\n",
+    "from multion.client import MultiOn\n",
+    "import agentops\n",
     "import os\n",
-    "\n",
-    "os.environ[\"MULTION_API_KEY\"] = \"multion_key\"\n",
-    "os.environ[\"AGENTOPS_API_KEY\"] = \"agentops_key\""
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -41,12 +63,30 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import multion\n",
-    "from multion.client import MultiOn\n",
+    "load_dotenv()\n",
+    "MULTION_API_KEY = os.getenv(\"MULTION_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Check your session\n",
+    "Check your session on [AgentOps](https://app.agentops.ai). This session should include the MultiOn browse action and the OpenAI call.\n",
     "\n",
+    "![image.png](attachment:image.png)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "multion = MultiOn(\n",
-    "    api_key=os.environ.get(\"MULTION_API_KEY\"),\n",
-    "    agentops_api_key=os.environ.get(\"AGENTOPS_API_KEY\"),\n",
+    "    api_key=MULTION_API_KEY,\n",
+    "    agentops_api_key=AGENTOPS_API_KEY,\n",
     ")\n",
     "\n",
     "cmd = \"what three things do i get with agentops\"\n",
@@ -79,13 +119,6 @@
     "\n",
     "![AgentOps Multion Retrieve](https://github.com/AgentOps-AI/agentops/blob/main/docs/images/agentops-multion-retrieve.gif?raw=true)"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
diff --git a/examples/openai-gpt.ipynb b/examples/openai-gpt.ipynb
index 6b9bfd1d..613880ab 100644
--- a/examples/openai-gpt.ipynb
+++ b/examples/openai-gpt.ipynb
@@ -11,6 +11,34 @@
     "This is an example of how to use the AgentOps library for basic Agent monitoring with OpenAI's GPT"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "3b2bf0da",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "88c8f60d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "fa9151e5",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -33,7 +61,7 @@
     "collapsed": false
    },
    "source": [
-    "Next, we'll grab our two API keys. You can use dotenv like below or however else you like to load environment variables"
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
    ]
   },
   {
@@ -62,108 +90,108 @@
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "openai = OpenAI(api_key=OPENAI_API_KEY)\n",
-    "agentops.init(AGENTOPS_API_KEY, tags=[\"openai-gpt-notebook\"])"
-   ],
+   "execution_count": null,
+   "id": "5d424a02e30ce7f4",
    "metadata": {
     "collapsed": false
    },
-   "id": "5d424a02e30ce7f4",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "openai = OpenAI(api_key=OPENAI_API_KEY)\n",
+    "agentops.init(AGENTOPS_API_KEY, default_tags=[\"openai-gpt-notebook\"])"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Now just use OpenAI as you would normally!"
-   ],
+   "id": "c77f4f920c07e3e6",
    "metadata": {
     "collapsed": false
    },
-   "id": "c77f4f920c07e3e6"
+   "source": [
+    "Now just use OpenAI as you would normally!"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "## Single Session with ChatCompletion"
-   ],
+   "id": "ca7011cf1ba076c9",
    "metadata": {
     "collapsed": false
    },
-   "id": "ca7011cf1ba076c9"
+   "source": [
+    "## Single Session with ChatCompletion"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "2704d6d625efa77f",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "message = ({\"role\": \"user\", \"content\": \"Write a 12 word poem about secret agents.\"},)\n",
     "res = openai.chat.completions.create(\n",
     "    model=\"gpt-3.5-turbo\", messages=message, temperature=0.5, stream=True\n",
     ")"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "2704d6d625efa77f",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Make sure to end your session with a `Result` (Success|Fail|Indeterminate) for better tracking"
-   ],
+   "id": "ce4965fc1614b5fe",
    "metadata": {
     "collapsed": false
    },
-   "id": "ce4965fc1614b5fe"
+   "source": [
+    "Make sure to end your session with a `Result` (Success|Fail|Indeterminate) for better tracking"
+   ]
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "agentops.end_session(\"Success\")"
-   ],
+   "execution_count": null,
+   "id": "537abd77cd0e0d25",
    "metadata": {
     "collapsed": false
    },
-   "id": "537abd77cd0e0d25",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "agentops.end_session(\"Success\")"
+   ]
   },
   {
    "cell_type": "markdown",
-   "source": [
-    "Now if you check the AgentOps dashboard, you should see information related to this run!"
-   ],
+   "id": "dd69580627842705",
    "metadata": {
     "collapsed": false
    },
-   "id": "dd69580627842705"
+   "source": [
+    "Now if you check the AgentOps dashboard, you should see information related to this run!"
+   ]
   },
   {
    "cell_type": "markdown",
+   "id": "b824bb935c7b7f80",
+   "metadata": {
+    "collapsed": false
+   },
    "source": [
     "# Events\n",
     "Additionally, you can track custom events via AgentOps.\n",
     "Let's start a new session and record some events "
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "b824bb935c7b7f80"
+   ]
   },
   {
    "cell_type": "code",
+   "execution_count": null,
+   "id": "544c8f1bdb8c6e4b",
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "# Create new session\n",
     "agentops.start_session(tags=[\"openai-gpt-notebook-events\"])"
-   ],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "544c8f1bdb8c6e4b",
-   "execution_count": null
+   ]
   },
   {
    "cell_type": "markdown",
@@ -227,15 +255,15 @@
   },
   {
    "cell_type": "code",
-   "outputs": [],
-   "source": [
-    "agentops.end_session(\"Success\")"
-   ],
+   "execution_count": null,
+   "id": "4ca2b49fc06adddb",
    "metadata": {
     "collapsed": false
    },
-   "id": "4ca2b49fc06adddb",
-   "execution_count": null
+   "outputs": [],
+   "source": [
+    "agentops.end_session(\"Success\")"
+   ]
   }
  ],
  "metadata": {
@@ -254,7 +282,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.8"
+   "version": "3.12.3"
   }
  },
  "nbformat": 4,
diff --git a/examples/recording-events.ipynb b/examples/recording-events.ipynb
index b766a716..7c744a3c 100644
--- a/examples/recording-events.ipynb
+++ b/examples/recording-events.ipynb
@@ -10,7 +10,68 @@
     "# Recording Events\n",
     "AgentOps has a number of different [Event Types](https://docs.agentops.ai/v1/details/events)\n",
     "\n",
-    "AgentOps automatically instruments your LLM Calls from OpenAI, LiteLLM, and Cohere. Just make sure their SDKs are imported before initializing AgentOps like we see below"
+    "We automatically instrument your LLM Calls from OpenAI, LiteLLM, Cohere, and more. Just make sure their SDKs are imported before initializing AgentOps like we see below"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0c475b2e",
+   "metadata": {},
+   "source": [
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ef2a575d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U openai\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "45f3c28f",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "54b0b276",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from openai import OpenAI\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "985ed1da",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0c3f4b1a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
    ]
   },
   {
@@ -22,14 +83,13 @@
    },
    "outputs": [],
    "source": [
-    "import agentops\n",
-    "import openai\n",
-    "\n",
-    "# Create new session\n",
+    "# Initialize the client, which will automatically start a session\n",
     "agentops.init()\n",
     "\n",
-    "# Optionally, we can add tags to the session\n",
-    "# agentops.init(tags=['Hello Tracker'])\n",
+    "# Optionally, we can add default tags to all sessions\n",
+    "# agentops.init(default_tags=['Hello Tracker'])\n",
+    "\n",
+    "openai = OpenAI()\n",
     "\n",
     "message = ({\"role\": \"user\", \"content\": \"Hello\"},)\n",
     "response = openai.chat.completions.create(\n",
@@ -68,12 +128,10 @@
    "source": [
     "from agentops import record_action\n",
     "\n",
-    "\n",
     "@record_action(\"add numbers\")\n",
     "def add(x, y):\n",
     "    return x + y\n",
     "\n",
-    "\n",
     "add(2, 4)"
    ]
   },
@@ -132,7 +190,7 @@
     "    tool_event = ToolEvent(\n",
     "        name=\"scrape_website\", params={\"url\": url}\n",
     "    )  # the start timestamp is set when the obj is created\n",
-    "    result = integration.scrape_website(data)  # perform tool logic\n",
+    "    result = \"scraped data\"  # perform tool logic\n",
     "    tool_event.returns = result\n",
     "    record(tool_event)"
    ]
diff --git a/tests/core_manual_tests/agentchat_agentops.ipynb b/tests/core_manual_tests/agentchat_agentops.ipynb
index 43283d13..3450ca6d 100644
--- a/tests/core_manual_tests/agentchat_agentops.ipynb
+++ b/tests/core_manual_tests/agentchat_agentops.ipynb
@@ -47,20 +47,63 @@
   },
   {
    "cell_type": "markdown",
-   "id": "8d9451f4",
+   "id": "de096590",
    "metadata": {},
    "source": [
-    "````{=mdx}\n",
-    ":::info Requirements\n",
-    "Some extra dependencies are needed for this notebook, which can be installed via pip:\n",
-    "\n",
-    "```bash\n",
-    "pip install pyautogen agentops\n",
-    "```\n",
-    "\n",
-    "For more information, please refer to the [installation guide](/docs/installation/).\n",
-    ":::\n",
-    "````"
+    "First let's install the required packages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f59d1440",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install -U pyautogen\n",
+    "%pip install -U agentops\n",
+    "%pip install -U python-dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f7f00197",
+   "metadata": {},
+   "source": [
+    "Then import them"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5adf56c9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from autogen import ConversableAgent, UserProxyAgent, config_list_from_json\n",
+    "import agentops\n",
+    "import os\n",
+    "from dotenv import load_dotenv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "22ef3e34",
+   "metadata": {},
+   "source": [
+    "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a2bbb306",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "load_dotenv()\n",
+    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n",
+    "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
    ]
   },
   {
@@ -95,7 +138,9 @@
     },
     {
      "data": {
-      "text/plain": "UUID('8771cfe1-d607-4987-8398-161cb5dbb5cf')"
+      "text/plain": [
+       "UUID('8771cfe1-d607-4987-8398-161cb5dbb5cf')"
+      ]
      },
      "execution_count": 2,
      "metadata": {},
@@ -103,11 +148,7 @@
     }
    ],
    "source": [
-    "import agentops\n",
-    "\n",
-    "from autogen import ConversableAgent, UserProxyAgent, config_list_from_json\n",
-    "\n",
-    "agentops.init(api_key=\"6f7b89eb-286f-44ed-af9c-a166358e5561\")"
+    "agentops.init(AGENTOPS_API_KEY)"
    ]
   },
   {
@@ -171,12 +212,8 @@
     }
    ],
    "source": [
-    "import agentops\n",
-    "\n",
     "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n",
-    "agentops.init(\n",
-    "    tags=[\"simple-autogen-example\"], api_key=\"6f7b89eb-286f-44ed-af9c-a166358e5561\"\n",
-    ")\n",
+    "agentops.init(default_tags=[\"simple-autogen-example\"])\n",
     "\n",
     "agentops.start_session()\n",
     "\n",
diff --git a/tests/core_manual_tests/upsert_events.py.ipynb b/tests/core_manual_tests/upsert_events.py.ipynb
deleted file mode 100644
index 434008ae..00000000
--- a/tests/core_manual_tests/upsert_events.py.ipynb
+++ /dev/null
@@ -1,50 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "id": "initial_id",
-   "metadata": {
-    "collapsed": true,
-    "ExecuteTime": {
-     "end_time": "2024-05-07T01:49:47.567382Z",
-     "start_time": "2024-05-07T01:49:47.148365Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "import agentops"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "outputs": [],
-   "source": [],
-   "metadata": {
-    "collapsed": false
-   },
-   "id": "cc69b52023168f58"
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.6"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}