Skip to content

Commit

Permalink
Merge branch 'main' into documentation-updates
Browse files Browse the repository at this point in the history
  • Loading branch information
albertkimjunior authored Jun 27, 2024
2 parents 2a685b7 + 0c071ae commit b0690b9
Show file tree
Hide file tree
Showing 13 changed files with 211 additions and 68 deletions.
31 changes: 30 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,35 @@ agentops.end_session('Success')
</details>


### LiteLLM

AgentOps provides support for LiteLLM(>=1.3.1), allowing you to call 100+ LLMs using the same Input/Output Format.

- [AgentOps integration example](https://docs.agentops.ai/v1/integrations/litellm)
- [Official LiteLLM documentation](https://docs.litellm.ai/docs/providers)

<details>
<summary>Installation</summary>

```bash
pip install litellm
```

```python python
# Do not use LiteLLM like this
# from litellm import completion
# ...
# response = completion(model="claude-3", messages=messages)

# Use LiteLLM like this
import litellm
...
response = litellm.completion(model="claude-3", messages=messages)
# or
response = await litellm.acompletion(model="claude-3", messages=messages)
```
</details>

### LlamaIndex 🦙

(Coming Soon)
Expand Down Expand Up @@ -261,4 +290,4 @@ AgentOps is designed to make agent observability, testing, and monitoring easy.

Check out our growth in the community:

<img src="https://api.star-history.com/svg?repos=AgentOps-AI/agentops&type=Date" style="max-width: 500px" width="50%" alt="Logo">
<img src="https://api.star-history.com/svg?repos=AgentOps-AI/agentops&type=Date" style="max-width: 500px" width="50%" alt="Logo">
11 changes: 0 additions & 11 deletions agentops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from .client import Client
from .config import Configuration
from .event import Event, ActionEvent, LLMEvent, ToolEvent, ErrorEvent
from .enums import Models
from .decorators import record_function
from .agent import track_agent
from .log_config import logger
Expand Down Expand Up @@ -184,16 +183,6 @@ def set_tags(tags: List[str]):
Client().set_tags(tags)


@check_init
def record_function(event_name: str):
return decorators.record_function(event_name)


@check_init
def track_agent(name: Union[str, None] = None):
return agent.track_agent(name)


def get_api_key() -> str:
return Client().api_key

Expand Down
14 changes: 9 additions & 5 deletions agentops/client.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""
AgentOps client module that provides a client class with public interfaces and configuration.
AgentOps client module that provides a client class with public interfaces and configuration.
Classes:
Client: Provides methods to interact with the AgentOps service.
Classes:
Client: Provides methods to interact with the AgentOps service.
"""

import os
Expand Down Expand Up @@ -80,7 +80,6 @@ def __init__(
inherited_session_id: Optional[str] = None,
skip_auto_end_session: Optional[bool] = False,
):

if override is not None:
logger.warning(
"The 'override' parameter is deprecated. Use 'instrument_llm_calls' instead.",
Expand Down Expand Up @@ -389,7 +388,7 @@ def end_session(
end_state_reason: Optional[str] = None,
video: Optional[str] = None,
is_auto_end: Optional[bool] = None,
):
) -> Decimal:
"""
End the current session with the AgentOps service.
Expand All @@ -398,6 +397,9 @@ def end_session(
end_state_reason (str, optional): The reason for ending the session.
video (str, optional): The video screen recording of the session
is_auto_end (bool, optional): is this an automatic use of end_session and should be skipped with skip_auto_end_session
Returns:
Decimal: The token cost of the session. Returns 0 if the cost is unknown.
"""

if is_auto_end and self.config.skip_auto_end_session:
Expand All @@ -420,6 +422,7 @@ def end_session(

if token_cost is None or token_cost == "unknown":
logger.info("Could not determine cost of run.")
token_cost_d = Decimal(0)
else:
token_cost_d = Decimal(token_cost)
logger.info(
Expand All @@ -439,6 +442,7 @@ def end_session(

self._session = None
self._worker = None
return token_cost_d

def create_agent(self, name: str, agent_id: Optional[str] = None):
if agent_id is None:
Expand Down
14 changes: 0 additions & 14 deletions agentops/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,6 @@ class EventType(Enum):
ERROR = "errors"


class Models(Enum):
GPT_3_5_TURBO = "gpt-3.5-turbo"
GPT_3_5_TURBO_0301 = "gpt-3.5-turbo-0301"
GPT_3_5_TURBO_0613 = "gpt-3.5-turbo-0613"
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
GPT_3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613"
GPT_4_0314 = "gpt-4-0314"
GPT_4 = "gpt-4"
GPT_4_32K = "gpt-4-32k"
GPT_4_32K_0314 = "gpt-4-32k-0314"
GPT_4_0613 = "gpt-4-0613"
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"


class EndState(Enum):
SUCCESS = "Success"
FAIL = "Fail"
Expand Down
8 changes: 4 additions & 4 deletions agentops/event.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
Event: Represents discrete events to be recorded.
"""

from dataclasses import asdict, dataclass, field
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Sequence, Union
from .helpers import get_ISO_time, check_call_stack_for_agent_id
from .enums import EventType, Models
from .enums import EventType
from uuid import UUID, uuid4
import traceback

Expand Down Expand Up @@ -72,7 +72,7 @@ class LLMEvent(Event):
prompt_tokens(int, optional): The number of tokens in the prompt message.
completion(str, object, optional): The message or returned by the LLM. Preferably in ChatML format which is more fully supported by AgentOps.
completion_tokens(int, optional): The number of tokens in the completion message.
model(Models, str, optional): LLM model e.g. "gpt-4". Models defined in enums.Models are more fully supported by AgentOps e.g. extra features in dashboard.
model(str, optional): LLM model e.g. "gpt-4", "gpt-3.5-turbo".
"""

Expand All @@ -82,7 +82,7 @@ class LLMEvent(Event):
prompt_tokens: Optional[int] = None
completion: Union[str, object] = None
completion_tokens: Optional[int] = None
model: Optional[Union[Models, str]] = None
model: Optional[str] = None


@dataclass
Expand Down
1 change: 1 addition & 0 deletions docs/mint.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
"pages": [
"v1/integrations/crewai",
"v1/integrations/autogen",
"v1/integrations/langchain",
"v1/integrations/cohere",
"v1/integrations/litellm"
]
Expand Down
4 changes: 1 addition & 3 deletions docs/v1/integrations/autogen.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@ Autogen has comprehensive [documentation](https://microsoft.github.io/autogen/do
poetry add agentops
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our <span id="stars-text">2,000th</span> 😊)
</Check>
<Check>Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our <span id="stars-text">2,000th</span> 😊)</Check>
</Step>
<Step title="Install Autogen">
<CodeGroup>
Expand Down
4 changes: 1 addition & 3 deletions docs/v1/integrations/crewai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@ Crew has comprehensive [documentation](https://docs.crewai.com) available as wel
poetry add agentops
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our <span id="stars-text">2,000th</span> 😊)
</Check>
<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our <span id="stars-text">2,000th</span> 😊)</Check>
</Step>
<Step title="Install Crew from the AgentOps fork">
<Warning>
Expand Down
110 changes: 110 additions & 0 deletions docs/v1/integrations/langchain.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
---
title: Langchain
description: "AgentOps provides first class support for Lanchain applications"
---

AgentOps works seamlessly with applications built using Langchain.

## Adding AgentOps to Langchain applications

<Steps>
<Step title="Install the AgentOps SDK and the additional Langchain dependency">
<CodeGroup>
```bash pip
pip install agentops
pip install agentops[langchain]
```
```bash poetry
poetry add agentops
poetry add agentops[langchain]
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
</Check>
</Step>
<Step title="Set up your import statements">
Import the following Langchain and AgentOps dependencies
<CodeGroup>
```python python
import os
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from agentops.langchain_callback_handler import LangchainCallbackHandler
```
</CodeGroup>
<Tip>
For more features see our [Usage](/v1/usage) section.
</Tip>
</Step>
<Step title="Set up your Langchain handler to make the calls">
Set up your Langchain agent with the AgentOps callback handler and AgentOps will automatically record your Langchain sessions.
<CodeGroup>
```python python
handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])

llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
callbacks=[handler],
model='gpt-3.5-turbo')

agent = initialize_agent(tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
callbacks=[handler], # You must pass in a callback handler to record your agent
handle_parsing_errors=True)
```
</CodeGroup>
<Tip>
Note that you don't need to set up a separate agentops.init() call, as the Langchain callback handler will automatically initialize the AgentOps client for you.
</Tip>
</Step>
<Step title="Set your API key">
Retrieve an API Key from your Settings > [Projects & API Keys](https://app.agentops.ai/settings/projects) page.
<Frame type="glass" caption="Settings > Projects & API Keys">
<img height="200" src="/images/api-keys.png" />
</Frame>
<Info>
API keys are tied to individual projects.<br></br>
A Default Project has been created for you, so just click Copy API Key
</Info>
Set this API Key in your [environment variables](/v1/usage/environment-variables)
```python .env
AGENTOPS_API_KEY=<YOUR API KEY>
```
</Step>
<Step title="Run your agent">
Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Langchain Agent! 🕵️
<Tip>
After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
</Tip>
<div/>{/* Intentionally blank div for newline */}
<Frame type="glass" caption="Clickable link to session">
<img height="200" src="https://github.com/AgentOps-AI/agentops/blob/cf67191f13e0e2a09446a61b7393e1810b3eee95/docs/images/link-to-session.gif?raw=true" />
</Frame>
</Step>
</Steps>

## Full Examples

<CodeGroup>
```python python
import os
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from agentops.langchain_callback_handler import LangchainCallbackHandler

handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])

llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
callbacks=[handler],
model='gpt-3.5-turbo')

agent = initialize_agent(tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
callbacks=[handler], # You must pass in a callback handler to record your agent
handle_parsing_errors=True)
```
</CodeGroup>
38 changes: 23 additions & 15 deletions docs/v1/introduction.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,29 @@ mode: "wide"

<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub if you find AgentOps helpful (you may be our <span id="stars-text">2,000th</span> 😊)</Check>

# AgentOps is your new terminal

<Card title="Terminals suck" icon="trash" iconType="solid" color="#d6483e">
- • Lose track of what your agents did in between executions
- • Parsing through terminal output searching for LLM completions
- • Printing "tool called"
</Card>
<Card title="AgentOps doesn't" icon="face-smile-halo" iconType="regular" color="#2bd600">
- • Visual dashboard so you can see what your agents did in human-readable format
- • LLM calls are magically recorded - prompt, completion, timestamps for each - with one line of code
- • Agents and their events (including tool calls) are recorded with one more line of code
- • Errors are magically associated to its causal event
- • Record any other events to your session with two more lines of code
- • Tons of other useful data if you're developing with supported agent frameworks: SDK version
</Card>
# AgentOps solves what Terminal can't

<CardGroup cols={2}>
<Card title="Terminal CAN'T..." icon="trash" iconType="solid" color="#d6483e">
- • Track agents across executions
- • Parse out LLM completions from output logs
- • Give you insight into what your agents did
</Card>
<Card title="AgentOps does it all and more..." icon="face-smile-halo" iconType="regular" color="#2bd600">
- • Record LLM prompts, completions, & timestamps
- • Log events, calls, and any other agent activity
- • Link agent errors back to their causal event
</Card>
</CardGroup>

And we do it all in just two lines of code...
<CodeGroup>
```python python
import agentops
agentops.init(<INSERT YOUR API KEY HERE>)
```
</CodeGroup>
... that logs everything back to your AgentOps Dashboard.

## The AgentOps Dashboard

Expand Down
3 changes: 0 additions & 3 deletions docs/v1/quickstart.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,7 @@ import SupportedModels from '/snippets/supported-models.mdx'
</Frame>
</Step>
</Steps>

<Check>[Give us a star](https://github.com/AgentOps-AI/agentops) if you liked AgentOps! (you may be our <span id="stars-text">2,000th</span> 😊)</Check>
<Test />


## More basic functionality

Expand Down
Loading

0 comments on commit b0690b9

Please sign in to comment.