Skip to content

Commit

Permalink
Merge branch 'main' into multiple-sessions
Browse files Browse the repository at this point in the history
# Conflicts:
#	agentops/client.py
#	examples/openai-gpt.ipynb
#	pyproject.toml
  • Loading branch information
bboynton97 committed Jul 1, 2024
2 parents 5cb7204 + c6223a1 commit 7cadfd4
Show file tree
Hide file tree
Showing 14 changed files with 237 additions and 179 deletions.
31 changes: 30 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,35 @@ agentops.end_session('Success')
</details>


### LiteLLM

AgentOps provides support for LiteLLM(>=1.3.1), allowing you to call 100+ LLMs using the same Input/Output Format.

- [AgentOps integration example](https://docs.agentops.ai/v1/integrations/litellm)
- [Official LiteLLM documentation](https://docs.litellm.ai/docs/providers)

<details>
<summary>Installation</summary>

```bash
pip install litellm
```

```python python
# Do not use LiteLLM like this
# from litellm import completion
# ...
# response = completion(model="claude-3", messages=messages)

# Use LiteLLM like this
import litellm
...
response = litellm.completion(model="claude-3", messages=messages)
# or
response = await litellm.acompletion(model="claude-3", messages=messages)
```
</details>

### LlamaIndex 🦙

(Coming Soon)
Expand Down Expand Up @@ -261,4 +290,4 @@ AgentOps is designed to make agent observability, testing, and monitoring easy.

Check out our growth in the community:

<img src="https://api.star-history.com/svg?repos=AgentOps-AI/agentops&type=Date" style="max-width: 500px" width="50%" alt="Logo">
<img src="https://api.star-history.com/svg?repos=AgentOps-AI/agentops&type=Date" style="max-width: 500px" width="50%" alt="Logo">
11 changes: 0 additions & 11 deletions agentops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from .client import Client
from .config import ClientConfiguration
from .event import Event, ActionEvent, LLMEvent, ToolEvent, ErrorEvent
from .enums import Models
from .decorators import record_function
from .agent import track_agent
from .log_config import logger
Expand Down Expand Up @@ -196,16 +195,6 @@ def set_tags(tags: List[str]):
Client().set_tags(tags)


@check_init
def record_function(event_name: str):
return decorators.record_function(event_name)


@check_init
def track_agent(name: Union[str, None] = None):
return agent.track_agent(name)


def get_api_key() -> str:
return Client().api_key

Expand Down
17 changes: 10 additions & 7 deletions agentops/client.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""
AgentOps client module that provides a client class with public interfaces and configuration.
AgentOps client module that provides a client class with public interfaces and configuration.
Classes:
Client: Provides methods to interact with the AgentOps service.
Classes:
Client: Provides methods to interact with the AgentOps service.
"""

import os
Expand Down Expand Up @@ -82,7 +82,6 @@ def __init__(
inherited_session_id: Optional[str] = None,
skip_auto_end_session: Optional[bool] = False,
):

if override is not None:
logger.warning(
"The 'override' parameter is deprecated. Use 'instrument_llm_calls' instead.",
Expand Down Expand Up @@ -392,7 +391,7 @@ def end_session(
end_state_reason: Optional[str] = None,
video: Optional[str] = None,
is_auto_end: Optional[bool] = None,
):
) -> Decimal:
"""
End the current session with the AgentOps service.
Expand All @@ -401,6 +400,9 @@ def end_session(
end_state_reason (str, optional): The reason for ending the session.
video (str, optional): The video screen recording of the session
is_auto_end (bool, optional): is this an automatic use of end_session and should be skipped with skip_auto_end_session
Returns:
Decimal: The token cost of the session. Returns 0 if the cost is unknown.
"""

session = self._safe_get_session()
Expand Down Expand Up @@ -428,6 +430,7 @@ def end_session(

if token_cost == "unknown" or token_cost is None:
logger.info("Could not determine cost of run.")
token_cost_d = Decimal(0)
else:
token_cost_d = Decimal(token_cost)
logger.info(
Expand All @@ -446,6 +449,7 @@ def end_session(
)

self._sessions.remove(session)
return token_cost_d

def create_agent(
self,
Expand Down Expand Up @@ -515,10 +519,9 @@ def handle_exception(exc_type, exc_value, exc_traceback):
)

for session in self._sessions:
self.end_session(
session.end_session(
end_state="Fail",
end_state_reason=f"{str(exc_value)}: {formatted_traceback}",
session_id=str(session.session_id),
)

# Then call the default excepthook to exit the program
Expand Down
14 changes: 0 additions & 14 deletions agentops/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,6 @@ class EventType(Enum):
ERROR = "errors"


class Models(Enum):
GPT_3_5_TURBO = "gpt-3.5-turbo"
GPT_3_5_TURBO_0301 = "gpt-3.5-turbo-0301"
GPT_3_5_TURBO_0613 = "gpt-3.5-turbo-0613"
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
GPT_3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613"
GPT_4_0314 = "gpt-4-0314"
GPT_4 = "gpt-4"
GPT_4_32K = "gpt-4-32k"
GPT_4_32K_0314 = "gpt-4-32k-0314"
GPT_4_0613 = "gpt-4-0613"
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"


class EndState(Enum):
SUCCESS = "Success"
FAIL = "Fail"
Expand Down
8 changes: 4 additions & 4 deletions agentops/event.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
Event: Represents discrete events to be recorded.
"""

from dataclasses import asdict, dataclass, field
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Sequence, Union
from .helpers import get_ISO_time, check_call_stack_for_agent_id
from .enums import EventType, Models
from .enums import EventType
from uuid import UUID, uuid4
import traceback

Expand Down Expand Up @@ -72,7 +72,7 @@ class LLMEvent(Event):
prompt_tokens(int, optional): The number of tokens in the prompt message.
completion(str, object, optional): The message or returned by the LLM. Preferably in ChatML format which is more fully supported by AgentOps.
completion_tokens(int, optional): The number of tokens in the completion message.
model(Models, str, optional): LLM model e.g. "gpt-4". Models defined in enums.Models are more fully supported by AgentOps e.g. extra features in dashboard.
model(str, optional): LLM model e.g. "gpt-4", "gpt-3.5-turbo".
"""

Expand All @@ -82,7 +82,7 @@ class LLMEvent(Event):
prompt_tokens: Optional[int] = None
completion: Union[str, object] = None
completion_tokens: Optional[int] = None
model: Optional[Union[Models, str]] = None
model: Optional[str] = None


@dataclass
Expand Down
9 changes: 5 additions & 4 deletions agentops/llm_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -811,8 +811,9 @@ def undo_override_openai_v1_async_completion(self):
completions.AsyncCompletions.create = original_create_async

def undo_override_ollama(self):
import ollama
if "ollama" in sys.modules:
import ollama

ollama.chat = original_func["ollama.chat"]
ollama.Client.chat = original_func["ollama.Client.chat"]
ollama.AsyncClient.chat = original_func["ollama.AsyncClient.chat"]
ollama.chat = original_func["ollama.chat"]
ollama.Client.chat = original_func["ollama.Client.chat"]
ollama.AsyncClient.chat = original_func["ollama.AsyncClient.chat"]
1 change: 1 addition & 0 deletions docs/mint.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
"pages": [
"v1/integrations/crewai",
"v1/integrations/autogen",
"v1/integrations/langchain",
"v1/integrations/cohere",
"v1/integrations/litellm"
]
Expand Down
2 changes: 1 addition & 1 deletion docs/v1/integrations/autogen.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Autogen has comprehensive [documentation](https://microsoft.github.io/autogen/do
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
</Check>
</Step>
<Step title="Install Autogen">
Expand Down
2 changes: 1 addition & 1 deletion docs/v1/integrations/cohere.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ This is a living integration. Should you need any added functionality message us
poetry add agentops
```
</CodeGroup>
<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)</Check>
<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)</Check>
</Step>
<Step title="Add 3 lines of code">
<CodeGroup>
Expand Down
2 changes: 1 addition & 1 deletion docs/v1/integrations/crewai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Crew has comprehensive [documentation](https://docs.crewai.com) available as wel
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 1,000th 😊)
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
</Check>
</Step>
<Step title="Install Crew from the AgentOps fork">
Expand Down
110 changes: 110 additions & 0 deletions docs/v1/integrations/langchain.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
---
title: Langchain
description: "AgentOps provides first class support for Lanchain applications"
---

AgentOps works seamlessly with applications built using Langchain.

## Adding AgentOps to Langchain applications

<Steps>
<Step title="Install the AgentOps SDK and the additional Langchain dependency">
<CodeGroup>
```bash pip
pip install agentops
pip install agentops[langchain]
```
```bash poetry
poetry add agentops
poetry add agentops[langchain]
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
</Check>
</Step>
<Step title="Set up your import statements">
Import the following Langchain and AgentOps dependencies
<CodeGroup>
```python python
import os
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from agentops.langchain_callback_handler import LangchainCallbackHandler
```
</CodeGroup>
<Tip>
For more features see our [Usage](/v1/usage) section.
</Tip>
</Step>
<Step title="Set up your Langchain handler to make the calls">
Set up your Langchain agent with the AgentOps callback handler and AgentOps will automatically record your Langchain sessions.
<CodeGroup>
```python python
handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])

llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
callbacks=[handler],
model='gpt-3.5-turbo')

agent = initialize_agent(tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
callbacks=[handler], # You must pass in a callback handler to record your agent
handle_parsing_errors=True)
```
</CodeGroup>
<Tip>
Note that you don't need to set up a separate agentops.init() call, as the Langchain callback handler will automatically initialize the AgentOps client for you.
</Tip>
</Step>
<Step title="Set your API key">
Retrieve an API Key from your Settings > [Projects & API Keys](https://app.agentops.ai/settings/projects) page.
<Frame type="glass" caption="Settings > Projects & API Keys">
<img height="200" src="/images/api-keys.png" />
</Frame>
<Info>
API keys are tied to individual projects.<br></br>
A Default Project has been created for you, so just click Copy API Key
</Info>
Set this API Key in your [environment variables](/v1/usage/environment-variables)
```python .env
AGENTOPS_API_KEY=<YOUR API KEY>
```
</Step>
<Step title="Run your agent">
Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Langchain Agent! 🕵️
<Tip>
After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
</Tip>
<div/>{/* Intentionally blank div for newline */}
<Frame type="glass" caption="Clickable link to session">
<img height="200" src="https://github.com/AgentOps-AI/agentops/blob/cf67191f13e0e2a09446a61b7393e1810b3eee95/docs/images/link-to-session.gif?raw=true" />
</Frame>
</Step>
</Steps>

## Full Examples

<CodeGroup>
```python python
import os
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from agentops.langchain_callback_handler import LangchainCallbackHandler

handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])

llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
callbacks=[handler],
model='gpt-3.5-turbo')

agent = initialize_agent(tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
callbacks=[handler], # You must pass in a callback handler to record your agent
handle_parsing_errors=True)
```
</CodeGroup>
43 changes: 26 additions & 17 deletions docs/v1/introduction.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,32 @@ description: "Build your next agent with evals, observability, and replays"
mode: "wide"
---

<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub if you find AgentOps helpful (you may be our 1,000th 😊)</Check>

# AgentOps is your new terminal

<Card title="Terminals suck" icon="trash" iconType="solid" color="#d6483e">
- • Lose track of what your agents did in between executions
- • Parsing through terminal output searching for LLM completions
- • Printing "tool called"
</Card>
<Card title="AgentOps doesn't" icon="face-smile-halo" iconType="regular" color="#2bd600">
- • Visual dashboard so you can see what your agents did in human-readable format
- • LLM calls are magically recorded - prompt, completion, timestamps for each - with one line of code
- • Agents and their events (including tool calls) are recorded with one more line of code
- • Errors are magically associated to its causal event
- • Record any other events to your session with two more lines of code
- • Tons of other useful data if you're developing with supported agent frameworks: SDK version
</Card>
<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub if you find AgentOps helpful (you may be our 2,000th 😊)</Check>

# AgentOps solves what Terminal can't

<CardGroup cols={2}>
<Card title="Terminal CAN'T..." icon="trash" iconType="solid" color="#d6483e">
- • Track agents across executions
- • Parse out LLM completions from output logs
- • Give you insight into what your agents did
</Card>
<Card title="AgentOps does it all and more..." icon="face-smile-halo" iconType="regular" color="#2bd600">
- • Record LLM prompts, completions, & timestamps
- • Log events, calls, and any other agent activity
- • Link agent errors back to their causal event
</Card>
</CardGroup>

And we do it all in just two lines of code...
<CodeGroup>
```python python
import agentops
agentops.init(<INSERT YOUR API KEY HERE>)
```
</CodeGroup>
... that logs everything back to your AgentOps Dashboard.


## The AgentOps Dashboard

Expand Down
Loading

0 comments on commit 7cadfd4

Please sign in to comment.