Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixing duplicate llm_pkey errors #389

Merged
merged 10 commits into from
Sep 17, 2024
1 change: 0 additions & 1 deletion agentops/llms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ class LlmTracker:

def __init__(self, client):
self.client = client
self.completion = ""

def override_api(self):
"""
Expand Down
50 changes: 24 additions & 26 deletions agentops/llms/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,27 +31,27 @@
from anthropic.resources import AsyncMessages
from anthropic.types import Message

self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 34 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L34

Added line #L34 was not covered by tests
siyangqiu marked this conversation as resolved.
Show resolved Hide resolved
if session is not None:
self.llm_event.session_id = session.session_id
llm_event.session_id = session.session_id

Check warning on line 36 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L36

Added line #L36 was not covered by tests

def handle_stream_chunk(chunk: Message):
try:
# We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if chunk.type == "message_start":
self.llm_event.returns = chunk
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.model = kwargs["model"]
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = chunk.message.usage.input_tokens
self.llm_event.completion = {
llm_event.returns = chunk
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = kwargs["model"]
llm_event.prompt = kwargs["messages"]
llm_event.prompt_tokens = chunk.message.usage.input_tokens
llm_event.completion = {

Check warning on line 47 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L42-L47

Added lines #L42 - L47 were not covered by tests
"role": chunk.message.role,
"content": "", # Always returned as [] in this instance type
}

elif chunk.type == "content_block_start":
if chunk.content_block.type == "text":
self.llm_event.completion["content"] += chunk.content_block.text
llm_event.completion["content"] += chunk.content_block.text

Check warning on line 54 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L54

Added line #L54 was not covered by tests

elif chunk.content_block.type == "tool_use":
self.tool_id = chunk.content_block.id
Expand All @@ -62,7 +62,7 @@

elif chunk.type == "content_block_delta":
if chunk.delta.type == "text_delta":
self.llm_event.completion["content"] += chunk.delta.text
llm_event.completion["content"] += chunk.delta.text

Check warning on line 65 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L65

Added line #L65 was not covered by tests

elif chunk.delta.type == "input_json_delta":
self.tool_event[self.tool_id].logs[
Expand All @@ -73,15 +73,15 @@
pass

elif chunk.type == "message_delta":
self.llm_event.completion_tokens = chunk.usage.output_tokens
llm_event.completion_tokens = chunk.usage.output_tokens

Check warning on line 76 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L76

Added line #L76 was not covered by tests

elif chunk.type == "message_stop":
self.llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, self.llm_event)
llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)

Check warning on line 80 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L79-L80

Added lines #L79 - L80 were not covered by tests

except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
session, ErrorEvent(trigger_event=llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
Expand Down Expand Up @@ -124,23 +124,21 @@

# Handle object responses
try:
self.llm_event.returns = response.model_dump()
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response.usage.input_tokens
self.llm_event.completion = {
llm_event.returns = response.model_dump()
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.prompt = kwargs["messages"]
llm_event.prompt_tokens = response.usage.input_tokens
llm_event.completion = {

Check warning on line 131 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L127-L131

Added lines #L127 - L131 were not covered by tests
"role": "assistant",
"content": response.content[0].text,
}
self.llm_event.completion_tokens = response.usage.output_tokens
self.llm_event.model = response.model
self.llm_event.end_timestamp = get_ISO_time()
llm_event.completion_tokens = response.usage.output_tokens
llm_event.model = response.model
llm_event.end_timestamp = get_ISO_time()

Check warning on line 137 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L135-L137

Added lines #L135 - L137 were not covered by tests

self._safe_record(session, self.llm_event)
self._safe_record(session, llm_event)

Check warning on line 139 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L139

Added line #L139 was not covered by tests
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))

Check warning on line 141 in agentops/llms/anthropic.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/anthropic.py#L141

Added line #L141 was not covered by tests
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(
Expand Down
46 changes: 22 additions & 24 deletions agentops/llms/cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,32 +52,32 @@

# from cohere.types.chat import ChatGenerationChunk
# NOTE: Cohere only returns one message and its role will be CHATBOT which we are coercing to "assistant"
self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 55 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L55

Added line #L55 was not covered by tests
if session is not None:
self.llm_event.session_id = session.session_id
llm_event.session_id = session.session_id

Check warning on line 57 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L57

Added line #L57 was not covered by tests

self.action_events = {}

def handle_stream_chunk(chunk, session: Optional[Session] = None):

# We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if isinstance(chunk, StreamedChatResponse_StreamStart):
self.llm_event.returns = chunk
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.model = kwargs.get("model", "command-r-plus")
self.llm_event.prompt = kwargs["message"]
self.llm_event.completion = ""
llm_event.returns = chunk
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = kwargs.get("model", "command-r-plus")
llm_event.prompt = kwargs["message"]
llm_event.completion = ""

Check warning on line 69 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L65-L69

Added lines #L65 - L69 were not covered by tests
return

try:
if isinstance(chunk, StreamedChatResponse_StreamEnd):
# StreamedChatResponse_TextGeneration = LLMEvent
self.llm_event.completion = {
llm_event.completion = {

Check warning on line 75 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L75

Added line #L75 was not covered by tests
"role": "assistant",
"content": chunk.response.text,
}
self.llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, self.llm_event)
llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)

Check warning on line 80 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L79-L80

Added lines #L79 - L80 were not covered by tests

# StreamedChatResponse_SearchResults = ActionEvent
search_results = chunk.response.search_results
Expand Down Expand Up @@ -115,7 +115,7 @@
self._safe_record(session, action_event)

elif isinstance(chunk, StreamedChatResponse_TextGeneration):
self.llm_event.completion += chunk.text
llm_event.completion += chunk.text

Check warning on line 118 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L118

Added line #L118 was not covered by tests
elif isinstance(chunk, StreamedChatResponse_ToolCallsGeneration):
pass
elif isinstance(chunk, StreamedChatResponse_CitationGeneration):
Expand All @@ -139,7 +139,7 @@

except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
session, ErrorEvent(trigger_event=llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
Expand Down Expand Up @@ -175,35 +175,33 @@
# Not enough to record StreamedChatResponse_ToolCallsGeneration because the tool may have not gotten called

try:
self.llm_event.returns = response
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = []
llm_event.returns = response
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.prompt = []

Check warning on line 180 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L178-L180

Added lines #L178 - L180 were not covered by tests
if response.chat_history:
role_map = {"USER": "user", "CHATBOT": "assistant", "SYSTEM": "system"}

for i in range(len(response.chat_history) - 1):
message = response.chat_history[i]
self.llm_event.prompt.append(
llm_event.prompt.append(

Check warning on line 186 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L186

Added line #L186 was not covered by tests
{
"role": role_map.get(message.role, message.role),
"content": message.message,
}
)

last_message = response.chat_history[-1]
self.llm_event.completion = {
llm_event.completion = {

Check warning on line 194 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L194

Added line #L194 was not covered by tests
"role": role_map.get(last_message.role, last_message.role),
"content": last_message.message,
}
self.llm_event.prompt_tokens = response.meta.tokens.input_tokens
self.llm_event.completion_tokens = response.meta.tokens.output_tokens
self.llm_event.model = kwargs.get("model", "command-r-plus")
llm_event.prompt_tokens = response.meta.tokens.input_tokens
llm_event.completion_tokens = response.meta.tokens.output_tokens
llm_event.model = kwargs.get("model", "command-r-plus")

Check warning on line 200 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L198-L200

Added lines #L198 - L200 were not covered by tests

self._safe_record(session, self.llm_event)
self._safe_record(session, llm_event)

Check warning on line 202 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L202

Added line #L202 was not covered by tests
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))

Check warning on line 204 in agentops/llms/cohere.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/cohere.py#L204

Added line #L204 was not covered by tests
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(
Expand Down
50 changes: 23 additions & 27 deletions agentops/llms/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,21 @@
from groq.resources.chat import AsyncCompletions
from groq.types.chat import ChatCompletionChunk

self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 40 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L40

Added line #L40 was not covered by tests
if session is not None:
self.llm_event.session_id = session.session_id
llm_event.session_id = session.session_id

Check warning on line 42 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L42

Added line #L42 was not covered by tests

def handle_stream_chunk(chunk: ChatCompletionChunk):
# NOTE: prompt/completion usage not returned in response when streaming
# We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if self.llm_event.returns == None:
self.llm_event.returns = chunk
if llm_event.returns == None:
llm_event.returns = chunk

Check warning on line 48 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L48

Added line #L48 was not covered by tests

try:
accumulated_delta = self.llm_event.returns.choices[0].delta
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.model = chunk.model
self.llm_event.prompt = kwargs["messages"]
accumulated_delta = llm_event.returns.choices[0].delta
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = chunk.model
llm_event.prompt = kwargs["messages"]

Check warning on line 54 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L51-L54

Added lines #L51 - L54 were not covered by tests

# NOTE: We assume for completion only choices[0] is relevant
choice = chunk.choices[0]
Expand All @@ -70,21 +70,19 @@

if choice.finish_reason:
# Streaming is done. Record LLMEvent
self.llm_event.returns.choices[0].finish_reason = (
choice.finish_reason
)
self.llm_event.completion = {
llm_event.returns.choices[0].finish_reason = choice.finish_reason
llm_event.completion = {

Check warning on line 74 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L73-L74

Added lines #L73 - L74 were not covered by tests
"role": accumulated_delta.role,
"content": accumulated_delta.content,
"function_call": accumulated_delta.function_call,
"tool_calls": accumulated_delta.tool_calls,
}
self.llm_event.end_timestamp = get_ISO_time()
llm_event.end_timestamp = get_ISO_time()

Check warning on line 80 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L80

Added line #L80 was not covered by tests

self._safe_record(session, self.llm_event)
self._safe_record(session, llm_event)

Check warning on line 82 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L82

Added line #L82 was not covered by tests
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
session, ErrorEvent(trigger_event=llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
Expand Down Expand Up @@ -127,19 +125,17 @@

# v1.0.0+ responses are objects
try:
self.llm_event.returns = response.model_dump()
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response.usage.prompt_tokens
self.llm_event.completion = response.choices[0].message.model_dump()
self.llm_event.completion_tokens = response.usage.completion_tokens
self.llm_event.model = response.model

self._safe_record(session, self.llm_event)
llm_event.returns = response.model_dump()
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.prompt = kwargs["messages"]
llm_event.prompt_tokens = response.usage.prompt_tokens
llm_event.completion = response.choices[0].message.model_dump()
llm_event.completion_tokens = response.usage.completion_tokens
llm_event.model = response.model

Check warning on line 134 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L128-L134

Added lines #L128 - L134 were not covered by tests

self._safe_record(session, llm_event)

Check warning on line 136 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L136

Added line #L136 was not covered by tests
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))

Check warning on line 138 in agentops/llms/groq.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/groq.py#L138

Added line #L138 was not covered by tests

kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
Expand Down
50 changes: 23 additions & 27 deletions agentops/llms/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,21 +49,21 @@
from openai.types.chat import ChatCompletionChunk
from litellm.utils import CustomStreamWrapper

self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 52 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L52

Added line #L52 was not covered by tests
if session is not None:
self.llm_event.session_id = session.session_id
llm_event.session_id = session.session_id

Check warning on line 54 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L54

Added line #L54 was not covered by tests

def handle_stream_chunk(chunk: ChatCompletionChunk):
# NOTE: prompt/completion usage not returned in response when streaming
# We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if self.llm_event.returns == None:
self.llm_event.returns = chunk
if llm_event.returns == None:
llm_event.returns = chunk

Check warning on line 60 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L60

Added line #L60 was not covered by tests

try:
accumulated_delta = self.llm_event.returns.choices[0].delta
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.model = chunk.model
self.llm_event.prompt = kwargs["messages"]
accumulated_delta = llm_event.returns.choices[0].delta
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = chunk.model
llm_event.prompt = kwargs["messages"]

Check warning on line 66 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L63-L66

Added lines #L63 - L66 were not covered by tests

# NOTE: We assume for completion only choices[0] is relevant
choice = chunk.choices[0]
Expand All @@ -82,21 +82,19 @@

if choice.finish_reason:
# Streaming is done. Record LLMEvent
self.llm_event.returns.choices[0].finish_reason = (
choice.finish_reason
)
self.llm_event.completion = {
llm_event.returns.choices[0].finish_reason = choice.finish_reason
llm_event.completion = {

Check warning on line 86 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L85-L86

Added lines #L85 - L86 were not covered by tests
"role": accumulated_delta.role,
"content": accumulated_delta.content,
"function_call": accumulated_delta.function_call,
"tool_calls": accumulated_delta.tool_calls,
}
self.llm_event.end_timestamp = get_ISO_time()
llm_event.end_timestamp = get_ISO_time()

Check warning on line 92 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L92

Added line #L92 was not covered by tests

self._safe_record(session, self.llm_event)
self._safe_record(session, llm_event)

Check warning on line 94 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L94

Added line #L94 was not covered by tests
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
session, ErrorEvent(trigger_event=llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
Expand Down Expand Up @@ -149,19 +147,17 @@

# v1.0.0+ responses are objects
try:
self.llm_event.returns = response
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response.usage.prompt_tokens
self.llm_event.completion = response.choices[0].message.model_dump()
self.llm_event.completion_tokens = response.usage.completion_tokens
self.llm_event.model = response.model

self._safe_record(session, self.llm_event)
llm_event.returns = response
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.prompt = kwargs["messages"]
llm_event.prompt_tokens = response.usage.prompt_tokens
llm_event.completion = response.choices[0].message.model_dump()
llm_event.completion_tokens = response.usage.completion_tokens
llm_event.model = response.model

Check warning on line 156 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L150-L156

Added lines #L150 - L156 were not covered by tests

self._safe_record(session, llm_event)

Check warning on line 158 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L158

Added line #L158 was not covered by tests
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))

Check warning on line 160 in agentops/llms/litellm.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/litellm.py#L160

Added line #L160 was not covered by tests

kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
Expand Down
Loading
Loading