diff --git a/agentops/llms/ollama.py b/agentops/llms/ollama.py index bcf753aa..3304cec0 100644 --- a/agentops/llms/ollama.py +++ b/agentops/llms/ollama.py @@ -59,6 +59,7 @@ def generator(): def override(self): self._override_chat_client() self._override_chat() + self._override_chat_async_client() def undo_override(self): if "ollama" in sys.modules: diff --git a/tests/core_manual_tests/providers/ollama_canary.py b/tests/core_manual_tests/providers/ollama_canary.py index 91b08493..12d41aa6 100644 --- a/tests/core_manual_tests/providers/ollama_canary.py +++ b/tests/core_manual_tests/providers/ollama_canary.py @@ -1,6 +1,9 @@ +import asyncio + import agentops from dotenv import load_dotenv import ollama +from ollama import AsyncClient load_dotenv() agentops.init(default_tags=["ollama-provider-test"]) @@ -10,12 +13,31 @@ messages=[ { "role": "user", - "content": "Why is the sky blue?", + "content": "say hello sync", }, ], ) -print(response) -print(response["message"]["content"]) + +stream_response = ollama.chat( + model="llama3.1", + messages=[ + { + "role": "user", + "content": "say hello str", + }, + ], + stream=True, +) +for chunk in stream_response: + print(chunk) + + +async def main(): + message = {"role": "user", "content": "say hello mr. async"} + async_response = await AsyncClient().chat(model="llama3.1", messages=[message]) + + +asyncio.run(main()) agentops.end_session(end_state="Success")