Skip to content

Commit

Permalink
ruff
Browse files Browse the repository at this point in the history
Signed-off-by: Teo <[email protected]>
  • Loading branch information
teocns committed Nov 23, 2024
1 parent 52c132e commit ec8445d
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 24 deletions.
17 changes: 8 additions & 9 deletions agentops/llms/llama_stack_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self, client):
def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict:
"""Handle responses for LlamaStack"""
from llama_stack_client import LlamaStackClient

Check warning on line 25 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L25

Added line #L25 was not covered by tests

llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 27 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L27

Added line #L27 was not covered by tests
if session is not None:
llm_event.session_id = session.session_id

Check warning on line 29 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L29

Added line #L29 was not covered by tests
Expand All @@ -47,8 +47,9 @@ def handle_stream_chunk(chunk: dict):
llm_event.returns.delta += choice.delta

Check warning on line 47 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L47

Added line #L47 was not covered by tests

if choice.event_type == "complete":

llm_event.prompt = [{ "content": message.content, "role": message.role } for message in kwargs["messages"]]
llm_event.prompt = [

Check warning on line 50 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L50

Added line #L50 was not covered by tests
{"content": message.content, "role": message.role} for message in kwargs["messages"]
]
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.completion = accumulated_delta
llm_event.prompt_tokens = None
Expand Down Expand Up @@ -88,9 +89,9 @@ async def async_generator():

try:
llm_event.returns = response
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = kwargs["model_id"]
llm_event.prompt = [{ "content": message.content, "role": message.role } for message in kwargs["messages"]]
llm_event.prompt = [{"content": message.content, "role": message.role} for message in kwargs["messages"]]
llm_event.prompt_tokens = None
llm_event.completion = response.completion_message.content
llm_event.completion_tokens = None
Expand Down Expand Up @@ -134,9 +135,7 @@ def override(self):
# self._override_stream_async()

def undo_override(self):
if (
self.original_complete is not None
):

if self.original_complete is not None:
from llama_stack_client.resources import InferenceResource

Check warning on line 139 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L139

Added line #L139 was not covered by tests

InferenceResource.chat_completion = self.original_complete

Check warning on line 141 in agentops/llms/llama_stack_client.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/llama_stack_client.py#L141

Added line #L141 was not covered by tests
32 changes: 17 additions & 15 deletions tests/core_manual_tests/providers/llama_stack_client_canary.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@

agentops.init(default_tags=["llama-stack-client-provider-test"])

host = "0.0.0.0" # LLAMA_STACK_HOST
port = 5001 # LLAMA_STACK_PORT
host = "0.0.0.0" # LLAMA_STACK_HOST
port = 5001 # LLAMA_STACK_PORT

full_host = f"http://{host}:{port}"

Expand All @@ -28,26 +28,28 @@
),
],
model_id="meta-llama/Llama-3.2-3B-Instruct",
stream=False
stream=False,
)


async def stream_test():
response = client.inference.chat_completion(
messages=[
UserMessage(
content="hello world, write me a 3 word poem about the moon",
role="user",
),
],
model_id="meta-llama/Llama-3.2-3B-Instruct",
stream=True
)
response = client.inference.chat_completion(
messages=[
UserMessage(
content="hello world, write me a 3 word poem about the moon",
role="user",
),
],
model_id="meta-llama/Llama-3.2-3B-Instruct",
stream=True,
)

async for log in EventLogger().log(response):
log.print()
async for log in EventLogger().log(response):
log.print()


async def main():
await stream_test()


agentops.end_session(end_state="Success")

0 comments on commit ec8445d

Please sign in to comment.