Skip to content

Commit

Permalink
groq test fix
Browse files Browse the repository at this point in the history
  • Loading branch information
bboynton97 committed Aug 14, 2024
1 parent ad0ce68 commit edc2bea
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 9 deletions.
8 changes: 4 additions & 4 deletions agentops/llms/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def __init__(self, client):

def override(self):
self._override_chat()
self._override_chat_stream()
self._override_async_chat()

Check warning on line 18 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L17-L18

Added lines #L17 - L18 were not covered by tests

def undo_override(self):
pass

Check warning on line 21 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L21

Added line #L21 was not covered by tests
Expand Down Expand Up @@ -159,15 +159,15 @@ def patched_function(*args, **kwargs):
# Override the original method with the patched one
completions.Completions.create = patched_function

Check warning on line 160 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L160

Added line #L160 was not covered by tests

def _override_chat_stream(self):
def _override_async_chat(self):
from groq.resources.chat import completions

Check warning on line 163 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L163

Added line #L163 was not covered by tests

original_create = completions.AsyncCompletions.create

Check warning on line 165 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L165

Added line #L165 was not covered by tests

def patched_function(*args, **kwargs):
async def patched_function(*args, **kwargs):

Check warning on line 167 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L167

Added line #L167 was not covered by tests
# Call the original function with its original arguments
init_timestamp = get_ISO_time()
result = original_create(*args, **kwargs)
result = await original_create(*args, **kwargs)
return self.handle_response(result, kwargs, init_timestamp)

Check warning on line 171 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L169-L171

Added lines #L169 - L171 were not covered by tests

# Override the original method with the patched one
Expand Down
30 changes: 28 additions & 2 deletions tests/core_manual_tests/providers/groq_canary.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,49 @@
import asyncio

import agentops
from dotenv import load_dotenv
import os
from groq import Groq
from groq import Groq, AsyncGroq

load_dotenv()
agentops.init(default_tags=["groq-provider-test"])
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
async_groq_client = AsyncGroq(api_key=os.getenv("GROQ_API_KEY"))

messages = [{"role": "user", "content": "Hello"}]

# option 1: use session.patch
res = groq_client.chat.completions.create(
model="llama3-70b-8192",
messages=[
{"role": "system", "content": "You are not a tracked agent"},
{"role": "user", "content": "Say hello"},
],
)

stream_res = groq_client.chat.completions.create(
model="llama3-70b-8192",
messages=[
{"role": "user", "content": "Say hello"},
],
stream=True,
)

for chunk in stream_res:
print(chunk)


async def async_test():
async_res = await async_groq_client.chat.completions.create(
model="llama3-70b-8192",
messages=[
{"role": "user", "content": "Say hello"},
],
)
print(async_res)


asyncio.run(async_test())

agentops.end_session(end_state="Success")

###
Expand Down
17 changes: 14 additions & 3 deletions tests/core_manual_tests/providers/openai_canary.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import agentops
from openai import OpenAI
import asyncio
from openai import OpenAI, AsyncOpenAI
from dotenv import load_dotenv
from agentops import ActionEvent

load_dotenv()
agentops.init(default_tags=["openai-v1-provider-test"])
openai = OpenAI()
async_openai = OpenAI()

messages = [{"role": "user", "content": "Hello"}]

Expand All @@ -16,10 +18,19 @@
temperature=0.5,
)

agentops.record(ActionEvent(action_type="test event"))
stream_response = openai.chat.completions.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.5, stream=True
)

for chunk in stream_response:
print(chunk)

async_response = async_openai.chat.completions.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.5
)

agentops.end_session(end_state="Success")

###
# Used to verify that one session is created with one LLM event
# Used to verify that one session is created with two LLM events
###

0 comments on commit edc2bea

Please sign in to comment.