Skip to content

Commit

Permalink
PR fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
HowieG committed May 10, 2024
1 parent 0a2517f commit 7a83865
Show file tree
Hide file tree
Showing 3 changed files with 113 additions and 88 deletions.
2 changes: 1 addition & 1 deletion agentops/llm_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ async def async_generator():
return response

def _handle_response_cohere(self, response, kwargs, init_timestamp):
# TODO: """Handle responses for Cohere versions >vx.x.x"""
# TODO: """Handle responses for Cohere versions >v5.4.0"""
from cohere.types.non_streamed_chat_response import NonStreamedChatResponse
from cohere.types.streamed_chat_response import (
StreamedChatResponse,
Expand Down
174 changes: 87 additions & 87 deletions examples/multi_agent_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,22 @@
"cells": [
{
"cell_type": "markdown",
"source": [
"# Multi-Agent Support\n",
"This is an example implementation of tracking events from two separate agents"
],
"id": "a2e266428cefc683",
"metadata": {
"collapsed": false
},
"id": "a2e266428cefc683"
"source": [
"# Multi-Agent Support\n",
"This is an example implementation of tracking events from two separate agents"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c566fac57d3b6ce",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import agentops\n",
Expand All @@ -23,53 +28,53 @@
"import logging\n",
"\n",
"from IPython.display import display, Markdown"
],
"metadata": {
"collapsed": false
},
"id": "7c566fac57d3b6ce",
"execution_count": null
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9f8c52496c04693",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"load_dotenv()\n",
"OPENAI_API_KEY = os.getenv('OPENAI_API_KEY', \"<your_openai_key>\")\n",
"AGENTOPS_API_KEY = os.getenv('AGENTOPS_API_KEY', \"<your_agentops_key>\")\n",
"logging.basicConfig(level=logging.DEBUG) # this will let us see that calls are assigned to an agent"
],
"metadata": {
"collapsed": false
},
"id": "9f8c52496c04693",
"execution_count": null
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af062552554d60ce",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"agentops.init(AGENTOPS_API_KEY)\n",
"openai_client = OpenAI(api_key = OPENAI_API_KEY )"
],
"metadata": {
"collapsed": false
},
"id": "af062552554d60ce",
"execution_count": null
]
},
{
"cell_type": "markdown",
"source": [
"Now lets create a few agents!"
],
"id": "95d212546aaf1f82",
"metadata": {
"collapsed": false
},
"id": "95d212546aaf1f82"
"source": [
"Now lets create a few agents!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "727e3cc26ce3ec3",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"@track_agent(name='qa')\n",
Expand All @@ -85,108 +90,108 @@
" res = openai_client.chat.completions.create(model='gpt-3.5-turbo', messages=[{\"role\": \"system\", \"content\": \"You are a software engineer and only output python code, no markdown tags.\"},\n",
" {\"role\": \"user\", \"content\": prompt}], temperature=0.5)\n",
" return res.choices[0].message.content"
],
"metadata": {
"collapsed": false
},
"id": "727e3cc26ce3ec3",
"execution_count": null
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "79b75d65de738522",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"qa = QaAgent()\n",
"engineer = EngineerAgent()"
],
"metadata": {
"collapsed": false
},
"id": "79b75d65de738522",
"execution_count": null
]
},
{
"cell_type": "markdown",
"id": "69dd3af9206308cc",
"metadata": {
"collapsed": false
},
"source": [
"Now we have our agents and we tagged them with the `@track_agent` decorator. Any LLM calls that go through this class will now be tagged as agent calls in AgentOps.\n",
"\n",
"Lets use these agents!"
],
"metadata": {
"collapsed": false
},
"id": "69dd3af9206308cc"
]
},
{
"cell_type": "code",
"outputs": [],
"source": [
"generated_func = engineer.completion(\"Write a python function that accepts two numbers and multiplies them together, then divides by two. No example.\")"
],
"execution_count": null,
"id": "69e76061a626549",
"metadata": {
"collapsed": false
},
"id": "69e76061a626549",
"execution_count": null
"outputs": [],
"source": [
"generated_func = engineer.completion(\"Write a python function that accepts two numbers and multiplies them together, then divides by two. No example.\")"
]
},
{
"cell_type": "code",
"outputs": [],
"source": [
"display(Markdown('```python\\n' + generated_func + '\\n```'))"
],
"execution_count": null,
"id": "830b86dac47dceb3",
"metadata": {
"collapsed": false
},
"id": "830b86dac47dceb3",
"execution_count": null
"outputs": [],
"source": [
"display(Markdown('```python\\n' + generated_func + '\\n```'))"
]
},
{
"cell_type": "code",
"outputs": [],
"source": [
"generated_test = qa.completion(\"Write a python unit test that test the following function: \\n \" + generated_func)"
],
"execution_count": null,
"id": "63c9d0d457aee91a",
"metadata": {
"collapsed": false
},
"id": "63c9d0d457aee91a",
"execution_count": null
"outputs": [],
"source": [
"generated_test = qa.completion(\"Write a python unit test that test the following function: \\n \" + generated_func)"
]
},
{
"cell_type": "code",
"outputs": [],
"source": [
"display(Markdown('```python\\n' + generated_test + '\\n```'))"
],
"execution_count": null,
"id": "a88ffcbd2015d422",
"metadata": {
"collapsed": false
},
"id": "a88ffcbd2015d422",
"execution_count": null
"outputs": [],
"source": [
"display(Markdown('```python\\n' + generated_test + '\\n```'))"
]
},
{
"cell_type": "markdown",
"source": [
"Perfect! It generated the code as expected, and in the DEBUG logs, you can see that the calls were made by agents named \"engineer\" and \"qa\"!"
],
"id": "1bd312ed049a5511",
"metadata": {
"collapsed": false
},
"id": "1bd312ed049a5511"
"source": [
"Perfect! It generated the code as expected, and in the DEBUG logs, you can see that the calls were made by agents named \"engineer\" and \"qa\"!"
]
},
{
"cell_type": "markdown",
"source": [
"Lets verify one more thing! If we make an LLM call outside of the context of a tracked agent, we want to make sure it gets assigned to the Default Agent."
],
"id": "cbd0817a31756397",
"metadata": {
"collapsed": false
},
"id": "cbd0817a31756397"
"source": [
"Lets verify one more thing! If we make an LLM call outside of the context of a tracked agent, we want to make sure it gets assigned to the Default Agent."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "122e923cb07fd5f4",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"res = openai_client.chat.completions.create(\n",
Expand All @@ -195,22 +200,17 @@
" {\"role\": \"user\", \"content\": \"Say hello\"}]\n",
")\n",
"res.choices[0].message.content"
],
"metadata": {
"collapsed": false
},
"id": "122e923cb07fd5f4",
"execution_count": null
]
},
{
"cell_type": "markdown",
"source": [
"You'll notice that we didn't log an agent name, so the AgentOps backend will assign it to the Default Agent for the session!"
],
"id": "a30909020c6a1ada",
"metadata": {
"collapsed": false
},
"id": "a30909020c6a1ada"
"source": [
"You'll notice that we didn't log an agent name, so the AgentOps backend will assign it to the Default Agent for the session!"
]
}
],
"metadata": {
Expand All @@ -229,7 +229,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
"version": "3.12.2"
}
},
"nbformat": 4,
Expand Down
25 changes: 25 additions & 0 deletions tests/openai_handlers/_test_single_openaiv1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from openai import OpenAI, AsyncOpenAI
import openai
import agentops
from dotenv import load_dotenv
load_dotenv()
client = OpenAI()

async_client = AsyncOpenAI()

# Assuming that initializing will trigger the LlmTracker to override methods
agentops.init(tags=['mock agent', openai.__version__])

# Now the client.chat.completions.create should be the overridden method
print('Chat completion')
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
)

agentops.end_session('Success')

0 comments on commit 7a83865

Please sign in to comment.