Skip to content

Commit

Permalink
Remove chatml validation from sdk (#127)
Browse files Browse the repository at this point in the history
* Removing chatml

* Adding prompt/completion tokens. Adding testing for streaming

* small fix to test file

* updated completion to return more

* removing dupes
  • Loading branch information
HowieG authored Apr 2, 2024
1 parent d165c3c commit 45005bd
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 2 deletions.
1 change: 1 addition & 0 deletions agentops/langchain_callback_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,7 @@ async def on_llm_end(
# TODO: more descriptive error
error_event = ErrorEvent(trigger_event=self.events.llm[str(run_id)],
details="on_llm_end: No generations", timestamp=get_ISO_time())
details="on_llm_end: No generations", timestamp=get_ISO_time())
self.ao_client.record(error_event)

@debug_print_function_params
Expand Down
4 changes: 2 additions & 2 deletions agentops/llm_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def generator():
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response['usage']['prompt_tokens']
self.llm_event.completion = response['choices'][0]['message']['content']
self.llm_event.completion = {"role": "assistant", "content": response['choices'][0]['message']['content']}
self.llm_event.completion_tokens = response['usage']['completion_tokens']
self.llm_event.returns = {"content": response['choices'][0]['message']['content']}
self.llm_event.model = response["model"]
Expand Down Expand Up @@ -181,7 +181,7 @@ async def async_generator():
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response.usage.prompt_tokens
self.llm_event.completion = response.choices[0].message.model_dump().get('content')
self.llm_event.completion = response.choices[0].message.model_dump()
self.llm_event.completion_tokens = response.usage.completion_tokens
self.llm_event.returns = response.model_dump()
self.llm_event.model = response.model
Expand Down

0 comments on commit 45005bd

Please sign in to comment.