From edc2bea11f9e024b6fc31c8260d7ff83c4edcb16 Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Wed, 14 Aug 2024 14:42:06 -0700 Subject: [PATCH] groq test fix --- agentops/llms/groq.py | 8 ++--- .../providers/groq_canary.py | 30 +++++++++++++++++-- .../providers/openai_canary.py | 17 +++++++++-- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/agentops/llms/groq.py b/agentops/llms/groq.py index 72974703..82e7c6bd 100644 --- a/agentops/llms/groq.py +++ b/agentops/llms/groq.py @@ -15,7 +15,7 @@ def __init__(self, client): def override(self): self._override_chat() - self._override_chat_stream() + self._override_async_chat() def undo_override(self): pass @@ -159,15 +159,15 @@ def patched_function(*args, **kwargs): # Override the original method with the patched one completions.Completions.create = patched_function - def _override_chat_stream(self): + def _override_async_chat(self): from groq.resources.chat import completions original_create = completions.AsyncCompletions.create - def patched_function(*args, **kwargs): + async def patched_function(*args, **kwargs): # Call the original function with its original arguments init_timestamp = get_ISO_time() - result = original_create(*args, **kwargs) + result = await original_create(*args, **kwargs) return self.handle_response(result, kwargs, init_timestamp) # Override the original method with the patched one diff --git a/tests/core_manual_tests/providers/groq_canary.py b/tests/core_manual_tests/providers/groq_canary.py index bc9481bf..62dff7ab 100644 --- a/tests/core_manual_tests/providers/groq_canary.py +++ b/tests/core_manual_tests/providers/groq_canary.py @@ -1,11 +1,14 @@ +import asyncio + import agentops from dotenv import load_dotenv import os -from groq import Groq +from groq import Groq, AsyncGroq load_dotenv() agentops.init(default_tags=["groq-provider-test"]) groq_client = Groq(api_key=os.getenv("GROQ_API_KEY")) +async_groq_client = AsyncGroq(api_key=os.getenv("GROQ_API_KEY")) messages = [{"role": "user", "content": "Hello"}] @@ -13,11 +16,34 @@ res = groq_client.chat.completions.create( model="llama3-70b-8192", messages=[ - {"role": "system", "content": "You are not a tracked agent"}, {"role": "user", "content": "Say hello"}, ], ) +stream_res = groq_client.chat.completions.create( + model="llama3-70b-8192", + messages=[ + {"role": "user", "content": "Say hello"}, + ], + stream=True, +) + +for chunk in stream_res: + print(chunk) + + +async def async_test(): + async_res = await async_groq_client.chat.completions.create( + model="llama3-70b-8192", + messages=[ + {"role": "user", "content": "Say hello"}, + ], + ) + print(async_res) + + +asyncio.run(async_test()) + agentops.end_session(end_state="Success") ### diff --git a/tests/core_manual_tests/providers/openai_canary.py b/tests/core_manual_tests/providers/openai_canary.py index 423664f5..e8361300 100644 --- a/tests/core_manual_tests/providers/openai_canary.py +++ b/tests/core_manual_tests/providers/openai_canary.py @@ -1,11 +1,13 @@ import agentops -from openai import OpenAI +import asyncio +from openai import OpenAI, AsyncOpenAI from dotenv import load_dotenv from agentops import ActionEvent load_dotenv() agentops.init(default_tags=["openai-v1-provider-test"]) openai = OpenAI() +async_openai = OpenAI() messages = [{"role": "user", "content": "Hello"}] @@ -16,10 +18,19 @@ temperature=0.5, ) -agentops.record(ActionEvent(action_type="test event")) +stream_response = openai.chat.completions.create( + model="gpt-3.5-turbo", messages=messages, temperature=0.5, stream=True +) + +for chunk in stream_response: + print(chunk) + +async_response = async_openai.chat.completions.create( + model="gpt-3.5-turbo", messages=messages, temperature=0.5 +) agentops.end_session(end_state="Success") ### -# Used to verify that one session is created with one LLM event +# Used to verify that one session is created with two LLM events ###