diff --git a/docs/images/external/ollama/ollama-icon.png b/docs/images/external/ollama/ollama-icon.png
new file mode 100644
index 00000000..46060de8
Binary files /dev/null and b/docs/images/external/ollama/ollama-icon.png differ
diff --git a/docs/mint.json b/docs/mint.json
index d2043620..45e61b45 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -92,6 +92,7 @@
"v1/integrations/langchain",
"v1/integrations/cohere",
"v1/integrations/anthropic",
+ "v1/integrations/ollama",
"v1/integrations/litellm",
"v1/integrations/multion",
"v1/integrations/rest"
diff --git a/docs/v1/examples/examples.mdx b/docs/v1/examples/examples.mdx
index 57765789..24ac2e33 100644
--- a/docs/v1/examples/examples.mdx
+++ b/docs/v1/examples/examples.mdx
@@ -31,6 +31,9 @@ mode: "wide"
Create an autonomous browser agent capable of navigating the web and extracting information
+ } iconType="image" href="/v1/examples/ollama">
+ Simple Ollama integration with AgentOps
+
## Video Guides
diff --git a/docs/v1/examples/ollama.mdx b/docs/v1/examples/ollama.mdx
new file mode 100644
index 00000000..a297f192
--- /dev/null
+++ b/docs/v1/examples/ollama.mdx
@@ -0,0 +1,123 @@
+---
+title: 'Ollama Example'
+description: 'Using Ollama with AgentOps'
+mode: "wide"
+---
+
+{/* SOURCE_FILE: examples/ollama_examples/ollama_examples.ipynb */}# AgentOps Ollama Integration
+
+This example demonstrates how to use AgentOps to monitor your Ollama LLM calls.
+
+First let's install the required packages
+
+> ⚠️ **Important**: Make sure you have Ollama installed and running locally before running this notebook. You can install it from [ollama.ai](https://ollama.com).
+
+
+```python
+%pip install -U ollama
+%pip install -U agentops
+%pip install -U python-dotenv
+```
+
+Then import them
+
+
+```python
+import ollama
+import agentops
+import os
+from dotenv import load_dotenv
+
+```
+
+Next, we'll set our API keys. For Ollama, we'll need to make sure Ollama is running locally.
+[Get an AgentOps API key](https://agentops.ai/settings/projects)
+
+1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...
+2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!
+
+
+```python
+# Let's load our environment variables
+load_dotenv()
+
+AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or ""
+```
+
+
+```python
+# Initialize AgentOps with some default tags
+agentops.init(AGENTOPS_API_KEY, default_tags=["ollama-example"])
+```
+
+Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use.
+
+
+```python
+ollama.pull("mistral")
+```
+
+
+```python
+# Basic completion,
+response = ollama.chat(model='mistral',
+ messages=[{
+ 'role': 'user',
+ 'content': 'What are the benefits of using AgentOps for monitoring LLMs?',
+ }]
+)
+print(response['message']['content'])
+```
+
+Let's try streaming responses as well
+
+
+```python
+# Streaming Example
+stream = ollama.chat(
+ model='mistral',
+ messages=[{
+ 'role': 'user',
+ 'content': 'Write a haiku about monitoring AI agents',
+ }],
+ stream=True
+)
+
+for chunk in stream:
+ print(chunk['message']['content'], end='')
+
+```
+
+
+```python
+# Conversation Example
+messages = [
+ {
+ 'role': 'user',
+ 'content': 'What is AgentOps?'
+ },
+ {
+ 'role': 'assistant',
+ 'content': 'AgentOps is a monitoring and observability platform for LLM applications.'
+ },
+ {
+ 'role': 'user',
+ 'content': 'Can you give me 3 key features?'
+ }
+]
+
+response = ollama.chat(
+ model='mistral',
+ messages=messages
+)
+print(response['message']['content'])
+```
+
+> 💡 **Note**: In production environments, you should add proper error handling around the Ollama calls and use `agentops.end_session("Error")` when exceptions occur.
+
+Finally, let's end our AgentOps session
+
+
+```python
+agentops.end_session("Success")
+```
diff --git a/docs/v1/integrations/ollama.mdx b/docs/v1/integrations/ollama.mdx
new file mode 100644
index 00000000..b88d78c7
--- /dev/null
+++ b/docs/v1/integrations/ollama.mdx
@@ -0,0 +1,150 @@
+---
+title: Ollama
+description: "AgentOps provides first class support for Ollama"
+---
+
+import CodeTooltip from '/snippets/add-code-tooltip.mdx'
+import EnvTooltip from '/snippets/add-env-tooltip.mdx'
+
+
+This is a living integration. Should you need any added functionality, message us on [Discord](https://discord.gg/UgJyyxx7uc)!
+
+
+} iconType="image" href="https://ollama.com">
+ First class support for Ollama
+
+
+
+
+
+ ```bash pip
+ pip install agentops ollama
+ ```
+ ```bash poetry
+ poetry add agentops ollama
+ ```
+
+
+
+
+
+ ```python python
+ import agentops
+ import ollama
+
+ agentops.init()
+ agentops.start_session()
+
+ ollama.pull("")
+
+ response = ollama.chat(model='mistral',
+ messages=[{
+ 'role': 'user',
+ 'content': 'What are the benefits of using AgentOps for monitoring LLMs?',
+ }]
+ )
+ print(response['message']['content'])
+ ...
+ # End of program (e.g. main.py)
+ agentops.end_session("Success") # Success|Fail|Indeterminate
+ ```
+
+
+
+ ```python .env
+ # Alternatively, you can set the API key as an environment variable
+ AGENTOPS_API_KEY=
+ ```
+
+ Read more about environment variables in [Advanced Configuration](/v1/usage/advanced-configuration)
+
+
+ Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agent! 🕵️
+
+ After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
+
+
+
+
+
+## Full Examples
+
+
+ ```python basic completion
+ import ollama
+ import agentops
+
+ agentops.init()
+
+ ollama.pull("")
+ response = ollama.chat(
+ model="",
+ max_tokens=1024,
+ messages=[{
+ "role": "user",
+ "content": "Write a haiku about AI and humans working together"
+ }]
+ )
+
+ print(response['message']['content'])
+ agentops.end_session('Success')
+ ```
+
+ ```python streaming
+ import agentops
+ import ollama
+
+ async def main():
+ agentops.init()
+ ollama.pull("")
+
+ stream = ollama.chat(
+ model="",
+ messages=[{
+ 'role': 'user',
+ 'content': 'Write a haiku about monitoring AI agents',
+ }],
+ stream=True
+ )
+
+ for chunk in stream:
+ print(chunk['message']['content'], end='')
+
+ agentops.end_session('Success')
+ ```
+
+ ```python conversation
+ import ollama
+ import agentops
+
+ agentops.init()
+ ollama.pull("")
+
+ messages = [
+ {
+ 'role': 'user',
+ 'content': 'What is AgentOps?'
+ },
+ {
+ 'role': 'assistant',
+ 'content': 'AgentOps is a monitoring and observability platform for LLM applications.'
+ },
+ {
+ 'role': 'user',
+ 'content': 'Can you give me 3 key features?'
+ }
+]
+
+ response = ollama.chat(
+ model="",
+ messages=messages
+ )
+ print(response['message']['content'])
+ agentops.end_session('Success')
+ ```
+
+
+
+
+
+
diff --git a/examples/ollama_examples/ollama_examples.ipynb b/examples/ollama_examples/ollama_examples.ipynb
new file mode 100644
index 00000000..c876ef7a
--- /dev/null
+++ b/examples/ollama_examples/ollama_examples.ipynb
@@ -0,0 +1,212 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# AgentOps Ollama Integration\n",
+ "\n",
+ "This example demonstrates how to use AgentOps to monitor your Ollama LLM calls.\n",
+ "\n",
+ "First let's install the required packages\n",
+ "\n",
+ "> ⚠️ **Important**: Make sure you have Ollama installed and running locally before running this notebook. You can install it from [ollama.ai](https://ollama.com)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -U ollama\n",
+ "%pip install -U agentops\n",
+ "%pip install -U python-dotenv"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Then import them"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import ollama\n",
+ "import agentops\n",
+ "import os\n",
+ "from dotenv import load_dotenv\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll set our API keys. For Ollama, we'll need to make sure Ollama is running locally.\n",
+ "[Get an AgentOps API key](https://agentops.ai/settings/projects)\n",
+ "\n",
+ "1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...\n",
+ "2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's load our environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Initialize AgentOps with some default tags\n",
+ "agentops.init(AGENTOPS_API_KEY, default_tags=[\"ollama-example\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ollama.pull(\"mistral\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Basic completion,\n",
+ "response = ollama.chat(model='mistral',\n",
+ " messages=[{\n",
+ " 'role': 'user',\n",
+ " 'content': 'What are the benefits of using AgentOps for monitoring LLMs?',\n",
+ " }]\n",
+ ")\n",
+ "print(response['message']['content'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's try streaming responses as well"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Streaming Example\n",
+ "stream = ollama.chat(\n",
+ " model='mistral',\n",
+ " messages=[{\n",
+ " 'role': 'user',\n",
+ " 'content': 'Write a haiku about monitoring AI agents',\n",
+ " }],\n",
+ " stream=True\n",
+ ")\n",
+ "\n",
+ "for chunk in stream:\n",
+ " print(chunk['message']['content'], end='')\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Conversation Example\n",
+ "messages = [\n",
+ " {\n",
+ " 'role': 'user',\n",
+ " 'content': 'What is AgentOps?'\n",
+ " },\n",
+ " {\n",
+ " 'role': 'assistant',\n",
+ " 'content': 'AgentOps is a monitoring and observability platform for LLM applications.'\n",
+ " },\n",
+ " {\n",
+ " 'role': 'user',\n",
+ " 'content': 'Can you give me 3 key features?'\n",
+ " }\n",
+ "]\n",
+ "\n",
+ "response = ollama.chat(\n",
+ " model='mistral',\n",
+ " messages=messages\n",
+ ")\n",
+ "print(response['message']['content'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "> 💡 **Note**: In production environments, you should add proper error handling around the Ollama calls and use `agentops.end_session(\"Error\")` when exceptions occur."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, let's end our AgentOps session"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "agentops.end_session(\"Success\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "gpt_desk",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}