From 70d1d3ac22a6056012fc833cb2ce807ad917aa23 Mon Sep 17 00:00:00 2001 From: Soo Date: Wed, 26 Jun 2024 15:39:19 -0700 Subject: [PATCH] Fixed breaking change lol --- agentops/__init__.py | 1 - agentops/enums.py | 15 --------------- agentops/event.py | 8 ++++---- 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/agentops/__init__.py b/agentops/__init__.py index e8600cce..6cf979e8 100755 --- a/agentops/__init__.py +++ b/agentops/__init__.py @@ -6,7 +6,6 @@ from .client import Client from .config import Configuration from .event import Event, ActionEvent, LLMEvent, ToolEvent, ErrorEvent -from .enums import Models from .decorators import record_function from .agent import track_agent from .log_config import logger diff --git a/agentops/enums.py b/agentops/enums.py index 2c93be15..13901065 100644 --- a/agentops/enums.py +++ b/agentops/enums.py @@ -8,21 +8,6 @@ class EventType(Enum): TOOL = "tools" ERROR = "errors" - -class Models(Enum): - GPT_3_5_TURBO = "gpt-3.5-turbo" - GPT_3_5_TURBO_0301 = "gpt-3.5-turbo-0301" - GPT_3_5_TURBO_0613 = "gpt-3.5-turbo-0613" - GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k" - GPT_3_5_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613" - GPT_4_0314 = "gpt-4-0314" - GPT_4 = "gpt-4" - GPT_4_32K = "gpt-4-32k" - GPT_4_32K_0314 = "gpt-4-32k-0314" - GPT_4_0613 = "gpt-4-0613" - TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002" - - class EndState(Enum): SUCCESS = "Success" FAIL = "Fail" diff --git a/agentops/event.py b/agentops/event.py index 9bfe3a8b..126abb0c 100644 --- a/agentops/event.py +++ b/agentops/event.py @@ -5,10 +5,10 @@ Event: Represents discrete events to be recorded. """ -from dataclasses import asdict, dataclass, field +from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Sequence, Union from .helpers import get_ISO_time, check_call_stack_for_agent_id -from .enums import EventType, Models +from .enums import EventType from uuid import UUID, uuid4 import traceback @@ -72,7 +72,7 @@ class LLMEvent(Event): prompt_tokens(int, optional): The number of tokens in the prompt message. completion(str, object, optional): The message or returned by the LLM. Preferably in ChatML format which is more fully supported by AgentOps. completion_tokens(int, optional): The number of tokens in the completion message. - model(Models, str, optional): LLM model e.g. "gpt-4". Models defined in enums.Models are more fully supported by AgentOps e.g. extra features in dashboard. + model(str, optional): LLM model e.g. "gpt-4". """ @@ -82,7 +82,7 @@ class LLMEvent(Event): prompt_tokens: Optional[int] = None completion: Union[str, object] = None completion_tokens: Optional[int] = None - model: Optional[Union[Models, str]] = None + model: Optional[str] = None @dataclass