From dd086a42f68aa05531430d61e09012a78457e317 Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Wed, 1 May 2024 22:30:58 -0700 Subject: [PATCH 1/3] WIP. Now recording trigger_events along with ErrorEvents. Previously wasn't, which was an oversight. However this is a hacky fix we may want to revisit --- agentops/client.py | 9 ++++++++- agentops/event.py | 4 ---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/agentops/client.py b/agentops/client.py index 2aa90e00..6b20343a 100644 --- a/agentops/client.py +++ b/agentops/client.py @@ -133,6 +133,12 @@ def record(self, event: Event | ErrorEvent): """ if self._session is not None and not self._session.has_ended: + if isinstance(event, ErrorEvent): + if event.trigger_event: + event.trigger_event_id = event.trigger_event.id + event.trigger_event_type = event.trigger_event.event_type + self._worker.add_event(event.trigger_event.__dict__) + event.trigger_event = None # removes trigger_event from serialization self._worker.add_event(event.__dict__) else: logger.warning( @@ -271,7 +277,8 @@ def end_session(self, print('🖇 AgentOps: Could not determine cost of run.') else: - print('🖇 AgentOps: This run cost ${}'.format('{:.2f}'.format(token_cost) if token_cost == 0 else '{:.6f}'.format(token_cost))) + print('🖇 AgentOps: This run cost ${}'.format('{:.2f}'.format( + token_cost) if token_cost == 0 else '{:.6f}'.format(token_cost))) self._session = None self._worker = None diff --git a/agentops/event.py b/agentops/event.py index e0f017db..1a047f18 100644 --- a/agentops/event.py +++ b/agentops/event.py @@ -135,10 +135,6 @@ class ErrorEvent(): def __post_init__(self): self.event_type = EventType.ERROR.value - if self.trigger_event: - self.trigger_event_id = self.trigger_event.id - self.trigger_event_type = self.trigger_event.event_type - self.trigger_event = None # removes trigger_event from serialization if self.exception: self.error_type = self.error_type or type(self.exception).__name__ self.details = self.details or str(self.exception) From c427d247ad0e9ee1fcbe560ad4c11af9925b0886 Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Thu, 2 May 2024 12:03:35 -0700 Subject: [PATCH 2/3] Reordering response logic so crash-possible parameters are parsed later --- agentops/llm_tracker.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/agentops/llm_tracker.py b/agentops/llm_tracker.py index 539bd56a..91fa978d 100644 --- a/agentops/llm_tracker.py +++ b/agentops/llm_tracker.py @@ -124,23 +124,23 @@ def handle_stream_chunk(chunk: ChatCompletionChunk): try: # NOTE: prompt/completion usage not returned in response when streaming - model = chunk.model + self.llm_event.agent_id = check_call_stack_for_agent_id() + self.llm_event = chunk.model + self.llm_event.prompt = kwargs["messages"] choices = chunk.choices token = choices[0].delta.content finish_reason = choices[0].finish_reason - function_call = choices[0].delta.function_call - tool_calls = choices[0].delta.tool_calls - role = choices[0].delta.role if token: self.completion += token if finish_reason: - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = kwargs["messages"] self.llm_event.completion = {"role": "assistant", "content": self.completion} + + role = choices[0].delta.role + function_call = choices[0].delta.function_call + tool_calls = choices[0].delta.tool_calls self.llm_event.returns = {"finish_reason": finish_reason, "content": self.completion, "function_call": function_call, "tool_calls": tool_calls, "role": role} - self.llm_event.model = model self.llm_event.end_timestamp = get_ISO_time() self.client.record(self.llm_event) @@ -182,11 +182,12 @@ async def async_generator(): try: self.llm_event.agent_id = check_call_stack_for_agent_id() self.llm_event.prompt = kwargs["messages"] - self.llm_event.prompt_tokens = response.usage.prompt_tokens - self.llm_event.completion = response.choices[0].message.model_dump() - self.llm_event.completion_tokens = response.usage.completion_tokens self.llm_event.returns = response.model_dump() self.llm_event.model = response.model + self.llm_event.completion = response.choices[0].message.model_dump() + self.llm_event.prompt_tokens = response.usage.prompt_tokens + self.llm_event.completion_tokens = response.usage.completion_tokens + self.llm_event.end_timestamp = get_ISO_time() self.client.record(self.llm_event) except Exception as e: From 7702268ebe42f5f8bc76023b19fdb6aea91cf073 Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Thu, 2 May 2024 15:42:43 -0700 Subject: [PATCH 3/3] Done --- agentops/client.py | 6 ------ agentops/llm_tracker.py | 21 ++++++++++----------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/agentops/client.py b/agentops/client.py index 6b20343a..ec39e24e 100644 --- a/agentops/client.py +++ b/agentops/client.py @@ -170,9 +170,6 @@ def _record_event_sync(self, func, event_name, *args, **kwargs): event.returns = returns event.end_timestamp = get_ISO_time() - # TODO: If func excepts this will never get called - # the dev loses all the useful stuff in ActionEvent they would need for debugging - # we should either record earlier or have Error post the supplied event to supabase self.record(event) except Exception as e: @@ -209,9 +206,6 @@ async def _record_event_async(self, func, event_name, *args, **kwargs): event.returns = returns event.end_timestamp = get_ISO_time() - # TODO: If func excepts this will never get called - # the dev loses all the useful stuff in ActionEvent they would need for debugging - # we should either record earlier or have Error post the supplied event to supabase self.record(event) except Exception as e: diff --git a/agentops/llm_tracker.py b/agentops/llm_tracker.py index 91fa978d..539bd56a 100644 --- a/agentops/llm_tracker.py +++ b/agentops/llm_tracker.py @@ -124,23 +124,23 @@ def handle_stream_chunk(chunk: ChatCompletionChunk): try: # NOTE: prompt/completion usage not returned in response when streaming - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event = chunk.model - self.llm_event.prompt = kwargs["messages"] + model = chunk.model choices = chunk.choices token = choices[0].delta.content finish_reason = choices[0].finish_reason + function_call = choices[0].delta.function_call + tool_calls = choices[0].delta.tool_calls + role = choices[0].delta.role if token: self.completion += token if finish_reason: + self.llm_event.agent_id = check_call_stack_for_agent_id() + self.llm_event.prompt = kwargs["messages"] self.llm_event.completion = {"role": "assistant", "content": self.completion} - - role = choices[0].delta.role - function_call = choices[0].delta.function_call - tool_calls = choices[0].delta.tool_calls self.llm_event.returns = {"finish_reason": finish_reason, "content": self.completion, "function_call": function_call, "tool_calls": tool_calls, "role": role} + self.llm_event.model = model self.llm_event.end_timestamp = get_ISO_time() self.client.record(self.llm_event) @@ -182,12 +182,11 @@ async def async_generator(): try: self.llm_event.agent_id = check_call_stack_for_agent_id() self.llm_event.prompt = kwargs["messages"] - self.llm_event.returns = response.model_dump() - self.llm_event.model = response.model - self.llm_event.completion = response.choices[0].message.model_dump() self.llm_event.prompt_tokens = response.usage.prompt_tokens + self.llm_event.completion = response.choices[0].message.model_dump() self.llm_event.completion_tokens = response.usage.completion_tokens - self.llm_event.end_timestamp = get_ISO_time() + self.llm_event.returns = response.model_dump() + self.llm_event.model = response.model self.client.record(self.llm_event) except Exception as e: