Skip to content

Commit

Permalink
fix: Improve prompt formatting and streaming event handling
Browse files Browse the repository at this point in the history
Co-Authored-By: Alex Reibman <[email protected]>
  • Loading branch information
devin-ai-integration[bot] and areibman committed Dec 18, 2024
1 parent 26c71cf commit 68b276e
Showing 1 changed file with 18 additions and 18 deletions.
36 changes: 18 additions & 18 deletions agentops/llms/providers/fireworks.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def __init__(self, client):
self._original_completion = None
self._original_async_completion = None
self._session = None # Initialize session attribute
self._accumulated_content = "" # Add accumulator for streaming responses
self._accumulated_content = "" # Track accumulated content for streaming
self._init_timestamp = None # Track stream start time
logger.info(f"Initializing {self._provider_name} provider")

Check warning on line 24 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L17-L24

Added lines #L17 - L24 were not covered by tests

def set_session(self, session: Session):
Expand All @@ -34,10 +35,9 @@ def handle_response(self, response, kwargs, init_timestamp, session: Optional[Se
logger.debug(f"Updated session to {session.session_id} for {self._provider_name} provider")

Check warning on line 35 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L34-L35

Added lines #L34 - L35 were not covered by tests

try:

Check warning on line 37 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L37

Added line #L37 was not covered by tests
# Format prompt properly
# Pass ChatML messages directly to LLMEvent
messages = kwargs.get("messages", [])
formatted_prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
logger.debug(f"Formatted prompt: {formatted_prompt}")
logger.debug(f"Using ChatML messages: {messages}")

Check warning on line 40 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L39-L40

Added lines #L39 - L40 were not covered by tests

# Handle streaming response
if kwargs.get("stream", False):
Expand All @@ -46,13 +46,14 @@ def handle_response(self, response, kwargs, init_timestamp, session: Optional[Se
event_type=EventType.LLM.value,
init_timestamp=init_timestamp,
model=kwargs.get("model", "unknown"),
prompt=formatted_prompt,
prompt=messages, # Pass ChatML directly
prompt_tokens=0,
completion_tokens=0,
cost=0.0,
)

async def async_generator(stream):

Check warning on line 55 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L55

Added line #L55 was not covered by tests
"""Handle async streaming response."""
self._accumulated_content = ""

Check warning on line 57 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L57

Added line #L57 was not covered by tests
async for chunk in stream:
try:

Check warning on line 59 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L59

Added line #L59 was not covered by tests
Expand All @@ -67,21 +68,21 @@ async def async_generator(stream):

if content:
self._accumulated_content += content

Check warning on line 70 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L70

Added line #L70 was not covered by tests
# Record event only when we have the complete response
# Only create event when stream is finished
if hasattr(chunk.choices[0], "finish_reason") and chunk.choices[0].finish_reason:
stream_event.completion = self._accumulated_content
stream_event.end_timestamp = get_ISO_time()

Check warning on line 74 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L73-L74

Added lines #L73 - L74 were not covered by tests
if self._session:
self._session.record(stream_event)
logger.debug(
f"Recorded streaming response for session {self._session.session_id}"
)
logger.debug(f"Recorded complete streaming response for session {self._session.session_id}")
self._accumulated_content = "" # Reset for next stream
yield content
except Exception as e:
logger.error(f"Error processing streaming chunk: {str(e)}")
continue
logger.error(f"Error in async streaming: {str(e)}")
raise

Check warning on line 82 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L76-L82

Added lines #L76 - L82 were not covered by tests

def generator(stream):

Check warning on line 84 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L84

Added line #L84 was not covered by tests
"""Handle synchronous streaming response."""
self._accumulated_content = ""

Check warning on line 86 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L86

Added line #L86 was not covered by tests
for chunk in stream:
try:

Check warning on line 88 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L88

Added line #L88 was not covered by tests
Expand All @@ -96,19 +97,18 @@ def generator(stream):

if content:
self._accumulated_content += content

Check warning on line 99 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L99

Added line #L99 was not covered by tests
# Record event only when we have the complete response
# Only create event when stream is finished
if hasattr(chunk.choices[0], "finish_reason") and chunk.choices[0].finish_reason:
stream_event.completion = self._accumulated_content
stream_event.end_timestamp = get_ISO_time()

Check warning on line 103 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L102-L103

Added lines #L102 - L103 were not covered by tests
if self._session:
self._session.record(stream_event)
logger.debug(
f"Recorded streaming response for session {self._session.session_id}"
)
logger.debug(f"Recorded complete streaming response for session {self._session.session_id}")
self._accumulated_content = "" # Reset for next stream
yield content
except Exception as e:
logger.error(f"Error processing streaming chunk: {str(e)}")
continue
logger.error(f"Error in sync streaming: {str(e)}")
raise

Check warning on line 111 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L105-L111

Added lines #L105 - L111 were not covered by tests

if hasattr(response, "__aiter__"):
return async_generator(response)

Check warning on line 114 in agentops/llms/providers/fireworks.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/fireworks.py#L114

Added line #L114 was not covered by tests
Expand All @@ -125,7 +125,7 @@ def generator(stream):
init_timestamp=init_timestamp,
end_timestamp=get_ISO_time(),
model=kwargs.get("model", "unknown"),
prompt=formatted_prompt,
prompt=messages, # Pass ChatML directly
completion=content,
prompt_tokens=0,
completion_tokens=0,
Expand Down

0 comments on commit 68b276e

Please sign in to comment.