Skip to content

Commit

Permalink
feat: Add OpenAI integration test with sync/async patterns
Browse files Browse the repository at this point in the history
- Add integration test demonstrating four OpenAI call patterns:
  - Sync (non-streaming)
  - Sync (streaming)
  - Async (non-streaming)
  - Async (streaming)
- Add python-dotenv to dev and test dependencies

Co-Authored-By: Alex Reibman <[email protected]>
  • Loading branch information
devin-ai-integration[bot] and areibman committed Dec 22, 2024
1 parent f844e0a commit bdc813c
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 1 deletion.
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ dev = [
"pyfakefs",
"requests_mock==1.11.0",
"ruff",
"vcrpy>=6.0.0; python_version >= '3.8'"
"vcrpy>=6.0.0; python_version >= '3.8'",
"python-dotenv"
]
ci = [
"tach~=0.9",
Expand Down
72 changes: 72 additions & 0 deletions tests/openai_handlers/test_openai_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import pytest
import agentops
import asyncio
from agentops import record_action
from openai import OpenAI, AsyncOpenAI
from dotenv import load_dotenv

load_dotenv()

@pytest.mark.integration
def test_openai_integration():
"""Integration test demonstrating all four OpenAI call patterns:
1. Sync (non-streaming)
2. Sync (streaming)
3. Async (non-streaming)
4. Async (streaming)
Verifies that AgentOps correctly tracks all LLM calls via analytics.
"""
# Initialize AgentOps without auto-starting session
agentops.init(auto_start_session=False)
session = agentops.start_session()

@record_action("openai-integration-sync-no-stream")
def sync_no_stream():
client = OpenAI()
client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello from sync no stream"}],
)

@record_action("openai-integration-sync-stream")
def sync_stream():
client = OpenAI()
stream_result = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello from sync streaming"}],
stream=True,
)
for _ in stream_result:
pass

@record_action("openai-integration-async-no-stream")
async def async_no_stream():
client = AsyncOpenAI()
await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello from async no stream"}],
)

@record_action("openai-integration-async-stream")
async def async_stream():
client = AsyncOpenAI()
async_stream_result = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello from async streaming"}],
stream=True,
)
async for _ in async_stream_result:
pass

# Call each function
sync_no_stream()
sync_stream()
asyncio.run(async_no_stream())
asyncio.run(async_stream())

session.end_session("Success")
analytics = session.get_analytics()

# Verify that all LLM calls were tracked
assert analytics["LLM calls"] >= 4, f"Expected at least 4 LLM calls, but got {analytics['LLM calls']}"
1 change: 1 addition & 0 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ deps =
langchain-core
langchain
termcolor
python-dotenv
-e .
commands =
coverage run --source . -m pytest
Expand Down

0 comments on commit bdc813c

Please sign in to comment.