diff --git a/README.md b/README.md
index 4d076e39..754fb666 100644
--- a/README.md
+++ b/README.md
@@ -212,6 +212,35 @@ agentops.end_session('Success')
+### LiteLLM
+
+AgentOps provides support for LiteLLM(>=1.3.1), allowing you to call 100+ LLMs using the same Input/Output Format.
+
+- [AgentOps integration example](https://docs.agentops.ai/v1/integrations/litellm)
+- [Official LiteLLM documentation](https://docs.litellm.ai/docs/providers)
+
+
+ Installation
+
+```bash
+pip install litellm
+```
+
+```python python
+# Do not use LiteLLM like this
+# from litellm import completion
+# ...
+# response = completion(model="claude-3", messages=messages)
+
+# Use LiteLLM like this
+import litellm
+...
+response = litellm.completion(model="claude-3", messages=messages)
+# or
+response = await litellm.acompletion(model="claude-3", messages=messages)
+```
+
+
### LlamaIndex 🦙
(Coming Soon)
@@ -261,4 +290,4 @@ AgentOps is designed to make agent observability, testing, and monitoring easy.
Check out our growth in the community:
-
\ No newline at end of file
+
diff --git a/agentops/__init__.py b/agentops/__init__.py
index 17a8ea0d..2847104f 100755
--- a/agentops/__init__.py
+++ b/agentops/__init__.py
@@ -8,7 +8,6 @@
from .client import Client
from .config import ClientConfiguration
from .event import Event, ActionEvent, LLMEvent, ToolEvent, ErrorEvent
-from .enums import Models
from .decorators import record_function
from .agent import track_agent
from .log_config import logger
@@ -196,16 +195,6 @@ def set_tags(tags: List[str]):
Client().set_tags(tags)
-@check_init
-def record_function(event_name: str):
- return decorators.record_function(event_name)
-
-
-@check_init
-def track_agent(name: Union[str, None] = None):
- return agent.track_agent(name)
-
-
def get_api_key() -> str:
return Client().api_key
diff --git a/agentops/client.py b/agentops/client.py
index 68bb0d5e..ec202a19 100644
--- a/agentops/client.py
+++ b/agentops/client.py
@@ -1,8 +1,8 @@
"""
- AgentOps client module that provides a client class with public interfaces and configuration.
+AgentOps client module that provides a client class with public interfaces and configuration.
- Classes:
- Client: Provides methods to interact with the AgentOps service.
+Classes:
+ Client: Provides methods to interact with the AgentOps service.
"""
import os
@@ -82,7 +82,6 @@ def __init__(
inherited_session_id: Optional[str] = None,
skip_auto_end_session: Optional[bool] = False,
):
-
if override is not None:
logger.warning(
"The 'override' parameter is deprecated. Use 'instrument_llm_calls' instead.",
@@ -392,7 +391,7 @@ def end_session(
end_state_reason: Optional[str] = None,
video: Optional[str] = None,
is_auto_end: Optional[bool] = None,
- ):
+ ) -> Decimal:
"""
End the current session with the AgentOps service.
@@ -401,6 +400,9 @@ def end_session(
end_state_reason (str, optional): The reason for ending the session.
video (str, optional): The video screen recording of the session
is_auto_end (bool, optional): is this an automatic use of end_session and should be skipped with skip_auto_end_session
+
+ Returns:
+ Decimal: The token cost of the session. Returns 0 if the cost is unknown.
"""
session = self._safe_get_session()
@@ -428,6 +430,7 @@ def end_session(
if token_cost == "unknown" or token_cost is None:
logger.info("Could not determine cost of run.")
+ token_cost_d = Decimal(0)
else:
token_cost_d = Decimal(token_cost)
logger.info(
@@ -446,6 +449,7 @@ def end_session(
)
self._sessions.remove(session)
+ return token_cost_d
def create_agent(
self,
@@ -515,10 +519,9 @@ def handle_exception(exc_type, exc_value, exc_traceback):
)
for session in self._sessions:
- self.end_session(
+ session.end_session(
end_state="Fail",
end_state_reason=f"{str(exc_value)}: {formatted_traceback}",
- session_id=str(session.session_id),
)
# Then call the default excepthook to exit the program
diff --git a/agentops/enums.py b/agentops/enums.py
index 2c93be15..5a687862 100644
--- a/agentops/enums.py
+++ b/agentops/enums.py
@@ -9,20 +9,6 @@ class EventType(Enum):
ERROR = "errors"
-class Models(Enum):
- GPT_3_5_TURBO = "gpt-3.5-turbo"
- GPT_3_5_TURBO_0301 = "gpt-3.5-turbo-0301"
- GPT_3_5_TURBO_0613 = "gpt-3.5-turbo-0613"
- GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
- GPT_3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613"
- GPT_4_0314 = "gpt-4-0314"
- GPT_4 = "gpt-4"
- GPT_4_32K = "gpt-4-32k"
- GPT_4_32K_0314 = "gpt-4-32k-0314"
- GPT_4_0613 = "gpt-4-0613"
- TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
-
-
class EndState(Enum):
SUCCESS = "Success"
FAIL = "Fail"
diff --git a/agentops/event.py b/agentops/event.py
index 9bfe3a8b..a08efcd7 100644
--- a/agentops/event.py
+++ b/agentops/event.py
@@ -5,10 +5,10 @@
Event: Represents discrete events to be recorded.
"""
-from dataclasses import asdict, dataclass, field
+from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Sequence, Union
from .helpers import get_ISO_time, check_call_stack_for_agent_id
-from .enums import EventType, Models
+from .enums import EventType
from uuid import UUID, uuid4
import traceback
@@ -72,7 +72,7 @@ class LLMEvent(Event):
prompt_tokens(int, optional): The number of tokens in the prompt message.
completion(str, object, optional): The message or returned by the LLM. Preferably in ChatML format which is more fully supported by AgentOps.
completion_tokens(int, optional): The number of tokens in the completion message.
- model(Models, str, optional): LLM model e.g. "gpt-4". Models defined in enums.Models are more fully supported by AgentOps e.g. extra features in dashboard.
+ model(str, optional): LLM model e.g. "gpt-4", "gpt-3.5-turbo".
"""
@@ -82,7 +82,7 @@ class LLMEvent(Event):
prompt_tokens: Optional[int] = None
completion: Union[str, object] = None
completion_tokens: Optional[int] = None
- model: Optional[Union[Models, str]] = None
+ model: Optional[str] = None
@dataclass
diff --git a/agentops/llm_tracker.py b/agentops/llm_tracker.py
index 25f2b4a1..07a7a916 100644
--- a/agentops/llm_tracker.py
+++ b/agentops/llm_tracker.py
@@ -811,8 +811,9 @@ def undo_override_openai_v1_async_completion(self):
completions.AsyncCompletions.create = original_create_async
def undo_override_ollama(self):
- import ollama
+ if "ollama" in sys.modules:
+ import ollama
- ollama.chat = original_func["ollama.chat"]
- ollama.Client.chat = original_func["ollama.Client.chat"]
- ollama.AsyncClient.chat = original_func["ollama.AsyncClient.chat"]
+ ollama.chat = original_func["ollama.chat"]
+ ollama.Client.chat = original_func["ollama.Client.chat"]
+ ollama.AsyncClient.chat = original_func["ollama.AsyncClient.chat"]
diff --git a/docs/mint.json b/docs/mint.json
index 3f797730..622c6eff 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -84,6 +84,7 @@
"pages": [
"v1/integrations/crewai",
"v1/integrations/autogen",
+ "v1/integrations/langchain",
"v1/integrations/cohere",
"v1/integrations/litellm"
]
diff --git a/docs/v1/integrations/autogen.mdx b/docs/v1/integrations/autogen.mdx
index 04922bfc..23ca4619 100644
--- a/docs/v1/integrations/autogen.mdx
+++ b/docs/v1/integrations/autogen.mdx
@@ -20,7 +20,7 @@ Autogen has comprehensive [documentation](https://microsoft.github.io/autogen/do
```
- [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)
+ [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
diff --git a/docs/v1/integrations/cohere.mdx b/docs/v1/integrations/cohere.mdx
index 58e6071b..91b76d15 100644
--- a/docs/v1/integrations/cohere.mdx
+++ b/docs/v1/integrations/cohere.mdx
@@ -22,7 +22,7 @@ This is a living integration. Should you need any added functionality message us
poetry add agentops
```
- [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)
+ [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
diff --git a/docs/v1/integrations/crewai.mdx b/docs/v1/integrations/crewai.mdx
index 61f825c0..8b55bad2 100644
--- a/docs/v1/integrations/crewai.mdx
+++ b/docs/v1/integrations/crewai.mdx
@@ -25,7 +25,7 @@ Crew has comprehensive [documentation](https://docs.crewai.com) available as wel
```
- [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)
+ [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
diff --git a/docs/v1/integrations/langchain.mdx b/docs/v1/integrations/langchain.mdx
new file mode 100644
index 00000000..6fef2008
--- /dev/null
+++ b/docs/v1/integrations/langchain.mdx
@@ -0,0 +1,110 @@
+---
+title: Langchain
+description: "AgentOps provides first class support for Lanchain applications"
+---
+
+AgentOps works seamlessly with applications built using Langchain.
+
+## Adding AgentOps to Langchain applications
+
+
+
+
+ ```bash pip
+ pip install agentops
+ pip install agentops[langchain]
+ ```
+ ```bash poetry
+ poetry add agentops
+ poetry add agentops[langchain]
+ ```
+
+
+ [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
+
+
+
+ Import the following Langchain and AgentOps dependencies
+
+ ```python python
+ import os
+ from langchain.chat_models import ChatOpenAI
+ from langchain.agents import initialize_agent, AgentType
+ from agentops.langchain_callback_handler import LangchainCallbackHandler
+ ```
+
+
+ For more features see our [Usage](/v1/usage) section.
+
+
+
+ Set up your Langchain agent with the AgentOps callback handler and AgentOps will automatically record your Langchain sessions.
+
+ ```python python
+ handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])
+
+ llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
+ callbacks=[handler],
+ model='gpt-3.5-turbo')
+
+ agent = initialize_agent(tools,
+ llm,
+ agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
+ verbose=True,
+ callbacks=[handler], # You must pass in a callback handler to record your agent
+ handle_parsing_errors=True)
+ ```
+
+
+ Note that you don't need to set up a separate agentops.init() call, as the Langchain callback handler will automatically initialize the AgentOps client for you.
+
+
+
+ Retrieve an API Key from your Settings > [Projects & API Keys](https://app.agentops.ai/settings/projects) page.
+
+
+
+
+ API keys are tied to individual projects.
+ A Default Project has been created for you, so just click Copy API Key
+
+ Set this API Key in your [environment variables](/v1/usage/environment-variables)
+ ```python .env
+ AGENTOPS_API_KEY=
+ ```
+
+
+ Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Langchain Agent! 🕵️
+
+ After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
+
+ {/* Intentionally blank div for newline */}
+
+
+
+
+
+
+## Full Examples
+
+
+ ```python python
+ import os
+ from langchain.chat_models import ChatOpenAI
+ from langchain.agents import initialize_agent, AgentType
+ from agentops.langchain_callback_handler import LangchainCallbackHandler
+
+ handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])
+
+ llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
+ callbacks=[handler],
+ model='gpt-3.5-turbo')
+
+ agent = initialize_agent(tools,
+ llm,
+ agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
+ verbose=True,
+ callbacks=[handler], # You must pass in a callback handler to record your agent
+ handle_parsing_errors=True)
+ ```
+
\ No newline at end of file
diff --git a/docs/v1/introduction.mdx b/docs/v1/introduction.mdx
index f043ba88..c2e20bad 100644
--- a/docs/v1/introduction.mdx
+++ b/docs/v1/introduction.mdx
@@ -4,23 +4,32 @@ description: "Build your next agent with evals, observability, and replays"
mode: "wide"
---
-[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub if you find AgentOps helpful (you may be our 1,000th 😊)
-
-# AgentOps is your new terminal
-
-
- - • Lose track of what your agents did in between executions
- - • Parsing through terminal output searching for LLM completions
- - • Printing "tool called"
-
-
- - • Visual dashboard so you can see what your agents did in human-readable format
- - • LLM calls are magically recorded - prompt, completion, timestamps for each - with one line of code
- - • Agents and their events (including tool calls) are recorded with one more line of code
- - • Errors are magically associated to its causal event
- - • Record any other events to your session with two more lines of code
- - • Tons of other useful data if you're developing with supported agent frameworks: SDK version
-
+[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub if you find AgentOps helpful (you may be our 2,000th 😊)
+
+# AgentOps solves what Terminal can't
+
+
+
+ - • Track agents across executions
+ - • Parse out LLM completions from output logs
+ - • Give you insight into what your agents did
+
+
+ - • Record LLM prompts, completions, & timestamps
+ - • Log events, calls, and any other agent activity
+ - • Link agent errors back to their causal event
+
+
+
+And we do it all in just two lines of code...
+
+ ```python python
+ import agentops
+ agentops.init()
+ ```
+
+... that logs everything back to your AgentOps Dashboard.
+
## The AgentOps Dashboard
diff --git a/docs/v1/quickstart.mdx b/docs/v1/quickstart.mdx
index 1a1d9bbe..a85f5b71 100644
--- a/docs/v1/quickstart.mdx
+++ b/docs/v1/quickstart.mdx
@@ -14,7 +14,6 @@ import SupportedModels from '/snippets/supported-models.mdx'
poetry add agentops
```
- [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)
@@ -61,7 +60,7 @@ import SupportedModels from '/snippets/supported-models.mdx'
-
+[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub if you found AgentOps helpful (you may be our 2,000th 😊)
## More basic functionality
diff --git a/examples/openai-gpt.ipynb b/examples/openai-gpt.ipynb
index 4acf8bef..20c9deee 100644
--- a/examples/openai-gpt.ipynb
+++ b/examples/openai-gpt.ipynb
@@ -70,173 +70,104 @@
},
{
"cell_type": "code",
- "execution_count": 3,
- "id": "fe8116d5969f1d23",
- "metadata": {
- "collapsed": false,
- "ExecuteTime": {
- "end_time": "2024-06-28T00:30:04.266191Z",
- "start_time": "2024-06-28T00:30:03.833561Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "🖇 AgentOps: \u001B[34m\u001B[34mSession Replay: https://app.agentops.ai/drilldown?session_id=806d7e02-abae-4e17-9c0e-b84c30157615\u001B[0m\u001B[0m\n"
- ]
- },
- {
- "data": {
- "text/plain": ""
- },
- "execution_count": 3,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
+ "outputs": [],
"source": [
"openai = OpenAI(api_key=OPENAI_API_KEY)\n",
"agentops.init(AGENTOPS_API_KEY, tags=['openai-gpt-notebook'])"
- ]
+ ],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "5d424a02e30ce7f4"
},
{
"cell_type": "markdown",
- "id": "3c20bbfa91b3419c",
+ "source": [
+ "Now just use OpenAI as you would normally!"
+ ],
"metadata": {
"collapsed": false
},
- "source": [
- "Now just use OpenAI as you would normally!"
- ]
+ "id": "c77f4f920c07e3e6"
},
{
"cell_type": "markdown",
- "id": "b42f5685ac4af5c2",
+ "source": [
+ "## Single Session with ChatCompletion"
+ ],
"metadata": {
"collapsed": false
},
- "source": [
- "## Single Session with ChatCompletion"
- ]
+ "id": "ca7011cf1ba076c9"
},
{
"cell_type": "code",
- "execution_count": 4,
- "id": "9cd47d3fa1e252e1",
- "metadata": {
- "collapsed": false,
- "ExecuteTime": {
- "end_time": "2024-06-28T00:30:05.424078Z",
- "start_time": "2024-06-28T00:30:04.263083Z"
- }
- },
"outputs": [],
"source": [
- "message = ({\"role\": \"user\", \"content\": \"Hello\"},)\n",
- "\n",
- "generator = openai.chat.completions.create(\n",
- " model=\"gpt-3.5-turbo\", messages=message, temperature=0.5\n",
+ "message = ({\"role\": \"user\", \"content\": \"Write a 12 word poem about secret agents.\"},)\n",
+ "res = openai.chat.completions.create(\n",
+ " model=\"gpt-3.5-turbo\", messages=message, temperature=0.5, stream=True\n",
")"
- ]
+ ],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "2704d6d625efa77f"
},
{
"cell_type": "markdown",
- "id": "bf75276ad9fbb3f4",
+ "source": [
+ "Make sure to end your session with a `Result` (Success|Fail|Indeterminate) for better tracking"
+ ],
"metadata": {
"collapsed": false
},
- "source": [
- "Make sure to end your session with a `Result` (Success|Fail|Indeterminate) for better tracking"
- ]
+ "id": "ce4965fc1614b5fe"
},
{
"cell_type": "code",
- "execution_count": 5,
- "id": "f59fe80a7e00e6e8",
- "metadata": {
- "collapsed": false,
- "ExecuteTime": {
- "end_time": "2024-06-28T00:30:06.533075Z",
- "start_time": "2024-06-28T00:30:05.425035Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "🖇 AgentOps: This run's cost $0.000017\n",
- "🖇 AgentOps: \u001B[34m\u001B[34mSession Replay: https://app.agentops.ai/drilldown?session_id=806d7e02-abae-4e17-9c0e-b84c30157615\u001B[0m\u001B[0m\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[{'event_type': 'llms', 'params': {'model': 'gpt-3.5-turbo', 'messages': ({'role': 'user', 'content': 'Hello'},), 'temperature': 0.5}, 'returns': {'id': 'chatcmpl-9etn3UDr2IpCwEQlCAoOa7LhDWXtq', 'choices': [{'finish_reason': 'stop', 'index': 0, 'logprobs': None, 'message': {'content': 'Hello! How can I assist you today?', 'role': 'assistant', 'function_call': None, 'tool_calls': None}}], 'created': 1719534605, 'model': 'gpt-3.5-turbo-0125', 'object': 'chat.completion', 'system_fingerprint': None, 'usage': {'completion_tokens': 9, 'prompt_tokens': 8, 'total_tokens': 17}}, 'init_timestamp': '2024-06-28T00:30:04.218Z', 'end_timestamp': '2024-06-28T00:30:05.421Z', 'agent_id': None, 'id': UUID('b0a1abb7-9ebd-407e-ba24-f612b70eebcf'), 'thread_id': None, 'prompt': ({'role': 'user', 'content': 'Hello'},), 'prompt_tokens': 8, 'completion': {'content': 'Hello! How can I assist you today?', 'role': 'assistant', 'function_call': None, 'tool_calls': None}, 'completion_tokens': 9, 'model': 'gpt-3.5-turbo-0125'}]\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"agentops.end_session(\"Success\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "318a7186c1be2d59",
+ ],
"metadata": {
"collapsed": false
},
- "source": [
- "Now if you check the AgentOps dashboard, you should see information related to this run!"
- ]
+ "id": "537abd77cd0e0d25"
},
{
"cell_type": "markdown",
- "id": "ccf998561cb9a834",
+ "source": [
+ "Now if you check the AgentOps dashboard, you should see information related to this run!"
+ ],
"metadata": {
"collapsed": false
},
+ "id": "dd69580627842705"
+ },
+ {
+ "cell_type": "markdown",
"source": [
"# Events\n",
"Additionally, you can track custom events via AgentOps.\n",
"Let's start a new session and record some events "
- ]
+ ],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "b824bb935c7b7f80"
},
{
"cell_type": "code",
- "execution_count": 6,
- "id": "f5a1a63ff4ecf127",
- "metadata": {
- "collapsed": false,
- "ExecuteTime": {
- "end_time": "2024-06-28T00:30:06.877746Z",
- "start_time": "2024-06-28T00:30:06.530608Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "🖇 AgentOps: \u001B[34m\u001B[34mSession Replay: https://app.agentops.ai/drilldown?session_id=128cc6ae-3bd3-405f-865d-6fbc0b34f4b9\u001B[0m\u001B[0m\n"
- ]
- },
- {
- "data": {
- "text/plain": ""
- },
- "execution_count": 6,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
+ "outputs": [],
"source": [
"# Create new session\n",
"agentops.start_session(tags=[\"openai-gpt-notebook-events\"])"
- ]
+ ],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "544c8f1bdb8c6e4b"
},
{
"cell_type": "markdown",