From 871b9fb61ac9a160511d4240edc932e0384f4c4f Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Sat, 14 Sep 2024 05:04:51 +0900 Subject: [PATCH 01/14] Capture track agent (#381) * handle calling create_agent before init * remove comments * add back warnings --- agentops/client.py | 19 +++++++++++++- agentops/decorators.py | 12 ++------- tests/test_pre_init.py | 57 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 11 deletions(-) create mode 100644 tests/test_pre_init.py diff --git a/agentops/client.py b/agentops/client.py index f2fc4dea1..50da1ed59 100644 --- a/agentops/client.py +++ b/agentops/client.py @@ -38,6 +38,7 @@ def __init__(self): self._llm_tracker: Optional[LlmTracker] = None self._sessions: List[Session] = active_sessions self._config = Configuration() + self._pre_init_queue = {"agents": []} self.configure( api_key=os.environ.get("AGENTOPS_API_KEY"), @@ -106,6 +107,13 @@ def initialize(self) -> Union[Session, None]: if self._config.auto_start_session: session = self.start_session() + if session: + for agent_args in self._pre_init_queue["agents"]: + session.create_agent( + name=agent_args["name"], agent_id=agent_args["agent_id"] + ) + self._pre_init_queue["agents"] = [] + return session def _initialize_partner_framework(self) -> None: @@ -234,6 +242,13 @@ def start_session( config=self._config, ) + if self._pre_init_queue["agents"] and len(self._pre_init_queue["agents"]) > 0: + for agent_args in self._pre_init_queue["agents"]: + session.create_agent( + name=agent_args["name"], agent_id=agent_args["agent_id"] + ) + self._pre_init_queue["agents"] = [] + if not session.is_running: return logger.error("Failed to start session") @@ -294,7 +309,9 @@ def create_agent( # if no session passed, assume single session session = self._safe_get_session() if session is None: - return + self._pre_init_queue["agents"].append( + {"name": name, "agent_id": agent_id} + ) session.create_agent(name=name, agent_id=agent_id) return agent_id diff --git a/agentops/decorators.py b/agentops/decorators.py index c1930b445..d291c4167 100644 --- a/agentops/decorators.py +++ b/agentops/decorators.py @@ -326,12 +326,6 @@ def new_init(self, *args, **kwargs): original_init(self, *args, **kwargs) - if not Client().is_initialized: - Client().add_pre_init_warning( - f"Failed to track an agent {name} because agentops.init() was not " - + "called before initializing the agent with the @track_agent decorator." - ) - self.agent_ops_agent_id = str(uuid4()) session = kwargs.get("session", None) @@ -345,12 +339,10 @@ def new_init(self, *args, **kwargs): ) except AttributeError as e: Client().add_pre_init_warning( - f"Failed to track an agent {name} because agentops.init() was not " - + "called before initializing the agent with the @track_agent decorator." + f"Failed to track an agent {name} with the @track_agent decorator." ) logger.warning( - "Failed to track an agent. This often happens if agentops.init() was not " - "called before initializing an agent with the @track_agent decorator." + "Failed to track an agent with the @track_agent decorator." ) original_init(self, *args, **kwargs) diff --git a/tests/test_pre_init.py b/tests/test_pre_init.py new file mode 100644 index 000000000..f87219ac2 --- /dev/null +++ b/tests/test_pre_init.py @@ -0,0 +1,57 @@ +import pytest +import requests_mock +import time +import agentops +from agentops import record_action, track_agent +from datetime import datetime +from agentops.singleton import clear_singletons +import contextlib + +jwts = ["some_jwt", "some_jwt2", "some_jwt3"] + + +@pytest.fixture(autouse=True) +def setup_teardown(): + clear_singletons() + yield + agentops.end_all_sessions() # teardown part + + +@contextlib.contextmanager +@pytest.fixture(autouse=True) +def mock_req(): + with requests_mock.Mocker() as m: + url = "https://api.agentops.ai" + m.post(url + "/v2/create_agent", text="ok") + m.post(url + "/v2/update_session", text="ok") + m.post( + url + "/v2/create_session", json={"status": "success", "jwt": "some_jwt"} + ) + + yield m + + +@track_agent(name="TestAgent") +class BasicAgent: + def __init__(self): + pass + + +class TestPreInit: + def setup_method(self): + self.url = "https://api.agentops.ai" + self.api_key = "11111111-1111-4111-8111-111111111111" + + def test_track_agent(self, mock_req): + agent = BasicAgent() + + assert len(mock_req.request_history) == 0 + + agentops.init(api_key=self.api_key) + + # Assert + # start session and create agent + assert len(mock_req.request_history) == 2 + assert mock_req.last_request.headers["X-Agentops-Api-Key"] == self.api_key + + agentops.end_session(end_state="Success") From 8161133a853f47ed269df2d6eb774a4ea46f9ecf Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Mon, 16 Sep 2024 17:13:24 -0700 Subject: [PATCH 02/14] Fixing job_posting and calling end_session in notebook automation (#390) * adding clarity * Renamed folders for consistency and clarity * addded back line i mistakenly deleted when refactoring examples * triggering automation only on merges to main from pull request now * updated test-notebooks.yml to not run job_posting at new crewai_examples location * Making sure agentops installed from main not pypi for automation * black formatting --- .github/workflows/test-notebooks.yml | 17 +++++++---------- .../anthropic_example.ipynb | 0 .../AgentChat.ipynb | 1 + .../MathAgent.ipynb | 1 + .../cohere_example.ipynb | 0 examples/{crew => crewai_examples}/README.md | 0 .../{crew => crewai_examples}/job_posting.ipynb | 14 +++++++++++--- .../markdown_validator.ipynb | 0 examples/demos/agentchat_agentops.ipynb | 1 + .../langchain_examples.ipynb | 4 ++++ .../litellm_example.ipynb | 0 .../Autonomous_web_browsing.ipynb | 0 .../Sample_browsing_agent.ipynb | 0 .../Step_by_step_web_browsing.ipynb | 0 .../Webpage_data_retrieval.ipynb | 0 15 files changed, 25 insertions(+), 13 deletions(-) rename examples/{anthropic-sdk => anthropic_examples}/anthropic_example.ipynb (100%) rename examples/{autogen => autogen_examples}/AgentChat.ipynb (99%) rename examples/{autogen => autogen_examples}/MathAgent.ipynb (99%) rename examples/{cohere-sdk => cohere_examples}/cohere_example.ipynb (100%) rename examples/{crew => crewai_examples}/README.md (100%) rename examples/{crew => crewai_examples}/job_posting.ipynb (95%) rename examples/{crew => crewai_examples}/markdown_validator.ipynb (100%) rename examples/{langchain => langchain_examples}/langchain_examples.ipynb (99%) rename examples/{litellm-sdk => litellm_examples}/litellm_example.ipynb (100%) rename examples/{multion => multion_examples}/Autonomous_web_browsing.ipynb (100%) rename examples/{multion => multion_examples}/Sample_browsing_agent.ipynb (100%) rename examples/{multion => multion_examples}/Step_by_step_web_browsing.ipynb (100%) rename examples/{multion => multion_examples}/Webpage_data_retrieval.ipynb (100%) diff --git a/.github/workflows/test-notebooks.yml b/.github/workflows/test-notebooks.yml index 406a73517..303437e03 100644 --- a/.github/workflows/test-notebooks.yml +++ b/.github/workflows/test-notebooks.yml @@ -1,14 +1,7 @@ name: Test Notebooks on: - push: - branches: - - main - paths: - - "agentops/**" - - "examples/**" - - "tests/**" - - ".github/workflows/test-notebooks.yml" - pull_request_target: + pull_request: + types: [closed] branches: - main paths: @@ -43,13 +36,17 @@ jobs: echo "GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}" >> .env echo "MULTION_API_KEY=${{ secrets.MULTION_API_KEY }}" >> .env echo "SERPER_API_KEY=${{ secrets.SERPER_API_KEY }}" >> .env + - name: Install AgentOps from main branch and remove agentops install from notebooks + run: | + pip install git+https://github.com/AgentOps-AI/agentops.git@main + find . -name '*.ipynb' -exec sed -i '/^%pip install.*agentops/d' {} + - name: Run notebooks and check for errors run: | mkdir -p logs exit_code=0 exclude_notebooks=( - "./examples/crew/job_posting.ipynb", + "./examples/crewai_examples/job_posting.ipynb", "./examples/demos/agentchat_agentops.ipynb" ) diff --git a/examples/anthropic-sdk/anthropic_example.ipynb b/examples/anthropic_examples/anthropic_example.ipynb similarity index 100% rename from examples/anthropic-sdk/anthropic_example.ipynb rename to examples/anthropic_examples/anthropic_example.ipynb diff --git a/examples/autogen/AgentChat.ipynb b/examples/autogen_examples/AgentChat.ipynb similarity index 99% rename from examples/autogen/AgentChat.ipynb rename to examples/autogen_examples/AgentChat.ipynb index 4de700ab3..85fd49910 100644 --- a/examples/autogen/AgentChat.ipynb +++ b/examples/autogen_examples/AgentChat.ipynb @@ -146,6 +146,7 @@ "except StdinNotImplementedError:\n", " # This is only necessary for AgentOps testing automation which is headless and will not have user input\n", " print(\"Stdin not implemented. Skipping initiate_chat\")\n", + " agentops.end_session(\"Indeterminate\")\n", "\n", "# Close your AgentOps session to indicate that it completed.\n", "agentops.end_session(\"Success\")\n", diff --git a/examples/autogen/MathAgent.ipynb b/examples/autogen_examples/MathAgent.ipynb similarity index 99% rename from examples/autogen/MathAgent.ipynb rename to examples/autogen_examples/MathAgent.ipynb index bf542594f..13bf58a81 100644 --- a/examples/autogen/MathAgent.ipynb +++ b/examples/autogen_examples/MathAgent.ipynb @@ -195,6 +195,7 @@ "except StdinNotImplementedError:\n", " # This is only necessary for AgentOps testing automation which is headless and will not have user input\n", " print(\"Stdin not implemented. Skipping initiate_chat\")\n", + " agentops.end_session(\"Indeterminate\")\n", "\n", "agentops.end_session(\"Success\")" ] diff --git a/examples/cohere-sdk/cohere_example.ipynb b/examples/cohere_examples/cohere_example.ipynb similarity index 100% rename from examples/cohere-sdk/cohere_example.ipynb rename to examples/cohere_examples/cohere_example.ipynb diff --git a/examples/crew/README.md b/examples/crewai_examples/README.md similarity index 100% rename from examples/crew/README.md rename to examples/crewai_examples/README.md diff --git a/examples/crew/job_posting.ipynb b/examples/crewai_examples/job_posting.ipynb similarity index 95% rename from examples/crew/job_posting.ipynb rename to examples/crewai_examples/job_posting.ipynb index eb3faaaac..4c118ac48 100644 --- a/examples/crew/job_posting.ipynb +++ b/examples/crewai_examples/job_posting.ipynb @@ -41,7 +41,10 @@ "from crewai_tools.tools import WebsiteSearchTool, SerperDevTool, FileReadTool\n", "import agentops\n", "import os\n", - "from dotenv import load_dotenv" + "from dotenv import load_dotenv\n", + "from IPython.core.error import (\n", + " StdinNotImplementedError,\n", + ") # only needed by AgentOps testing automation" ] }, { @@ -244,8 +247,13 @@ " ],\n", ")\n", "\n", - "# Kick off the process\n", - "result = crew.kickoff()\n", + "try:\n", + " # Kick off the process\n", + " result = crew.kickoff()\n", + "except StdinNotImplementedError:\n", + " # This is only necessary for AgentOps testing automation which is headless and will not have user input\n", + " print(\"Stdin not implemented. Skipping kickoff()\")\n", + " agentops.end_session(\"Indeterminate\")\n", "\n", "print(\"Job Posting Creation Process Completed.\")\n", "print(\"Final Job Posting:\")\n", diff --git a/examples/crew/markdown_validator.ipynb b/examples/crewai_examples/markdown_validator.ipynb similarity index 100% rename from examples/crew/markdown_validator.ipynb rename to examples/crewai_examples/markdown_validator.ipynb diff --git a/examples/demos/agentchat_agentops.ipynb b/examples/demos/agentchat_agentops.ipynb index 7b0d7b18e..2aa7a84e7 100644 --- a/examples/demos/agentchat_agentops.ipynb +++ b/examples/demos/agentchat_agentops.ipynb @@ -196,6 +196,7 @@ "except StdinNotImplementedError:\n", " # This is only necessary for AgentOps testing automation which is headless and will not have user input\n", " print(\"Stdin not implemented. Skipping initiate_chat\")\n", + " agentops.end_session(\"Indeterminate\")\n", "\n", "# Close your AgentOps session to indicate that it completed.\n", "agentops.end_session(\"Success\")" diff --git a/examples/langchain/langchain_examples.ipynb b/examples/langchain_examples/langchain_examples.ipynb similarity index 99% rename from examples/langchain/langchain_examples.ipynb rename to examples/langchain_examples/langchain_examples.ipynb index 361064c61..b087de3b7 100644 --- a/examples/langchain/langchain_examples.ipynb +++ b/examples/langchain_examples/langchain_examples.ipynb @@ -148,6 +148,10 @@ }, "outputs": [], "source": [ + "agentops_handler = AgentOpsLangchainCallbackHandler(\n", + " api_key=AGENTOPS_API_KEY, default_tags=[\"Langchain Example\"]\n", + ")\n", + "\n", "llm = ChatOpenAI(\n", " openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n", ")\n", diff --git a/examples/litellm-sdk/litellm_example.ipynb b/examples/litellm_examples/litellm_example.ipynb similarity index 100% rename from examples/litellm-sdk/litellm_example.ipynb rename to examples/litellm_examples/litellm_example.ipynb diff --git a/examples/multion/Autonomous_web_browsing.ipynb b/examples/multion_examples/Autonomous_web_browsing.ipynb similarity index 100% rename from examples/multion/Autonomous_web_browsing.ipynb rename to examples/multion_examples/Autonomous_web_browsing.ipynb diff --git a/examples/multion/Sample_browsing_agent.ipynb b/examples/multion_examples/Sample_browsing_agent.ipynb similarity index 100% rename from examples/multion/Sample_browsing_agent.ipynb rename to examples/multion_examples/Sample_browsing_agent.ipynb diff --git a/examples/multion/Step_by_step_web_browsing.ipynb b/examples/multion_examples/Step_by_step_web_browsing.ipynb similarity index 100% rename from examples/multion/Step_by_step_web_browsing.ipynb rename to examples/multion_examples/Step_by_step_web_browsing.ipynb diff --git a/examples/multion/Webpage_data_retrieval.ipynb b/examples/multion_examples/Webpage_data_retrieval.ipynb similarity index 100% rename from examples/multion/Webpage_data_retrieval.ipynb rename to examples/multion_examples/Webpage_data_retrieval.ipynb From e06bf61a940430f8dc74ec3946d3df02c5b0c36d Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Mon, 16 Sep 2024 17:14:32 -0700 Subject: [PATCH 03/14] Fixed Time Travel output for multithreading and clarity (#388) * Fixed for multithreading and clarity * dont need threading * leftover line * fixed for threading * cleaning up time travel output * Added prepend string --- agentops/time_travel.py | 53 +++++++++++++---------------------------- 1 file changed, 17 insertions(+), 36 deletions(-) diff --git a/agentops/time_travel.py b/agentops/time_travel.py index 14e8b2af4..6c1c7588f 100644 --- a/agentops/time_travel.py +++ b/agentops/time_travel.py @@ -5,6 +5,8 @@ from .exceptions import ApiServerException from .singleton import singleton +ttd_prepend_string = "🖇️ Agentops: ⏰ Time Travel |" + @singleton class TimeTravel: @@ -47,9 +49,9 @@ def fetch_time_travel_id(ttd_id): set_time_travel_active_state(True) except ApiServerException as e: - manage_time_travel_state(activated=False, error=e) + print(f"{ttd_prepend_string} Error - {e}") except Exception as e: - manage_time_travel_state(activated=False, error=e) + print(f"{ttd_prepend_string} Error - {e}") def fetch_completion_override_from_time_travel_cache(kwargs): @@ -64,14 +66,14 @@ def fetch_completion_override_from_time_travel_cache(kwargs): def find_cache_hit(prompt_messages, completion_overrides): if not isinstance(prompt_messages, (list, tuple)): print( - "Time Travel Error - unexpected type for prompt_messages. Expected 'list' or 'tuple'. Got ", + f"{ttd_prepend_string} Error - unexpected type for prompt_messages. Expected 'list' or 'tuple'. Got ", type(prompt_messages), ) return None if not isinstance(completion_overrides, dict): print( - "Time Travel Error - unexpected type for completion_overrides. Expected 'dict'. Got ", + f"{ttd_prepend_string} Error - unexpected type for completion_overrides. Expected 'dict'. Got ", type(completion_overrides), ) return None @@ -80,7 +82,7 @@ def find_cache_hit(prompt_messages, completion_overrides): completion_override_dict = eval(key) if not isinstance(completion_override_dict, dict): print( - "Time Travel Error - unexpected type for completion_override_dict. Expected 'dict'. Got ", + f"{ttd_prepend_string} Error - unexpected type for completion_override_dict. Expected 'dict'. Got ", type(completion_override_dict), ) continue @@ -88,7 +90,7 @@ def find_cache_hit(prompt_messages, completion_overrides): cached_messages = completion_override_dict.get("messages") if not isinstance(cached_messages, list): print( - "Time Travel Error - unexpected type for cached_messages. Expected 'list'. Got ", + f"{ttd_prepend_string} Error - unexpected type for cached_messages. Expected 'list'. Got ", type(cached_messages), ) continue @@ -105,10 +107,12 @@ def find_cache_hit(prompt_messages, completion_overrides): return value except (SyntaxError, ValueError, TypeError) as e: print( - f"Time Travel Error - Error processing completion_overrides item: {e}" + f"{ttd_prepend_string} Error - Error processing completion_overrides item: {e}" ) except Exception as e: - print(f"Time Travel Error - Unexpected error in find_cache_hit: {e}") + print( + f"{ttd_prepend_string} Error - Unexpected error in find_cache_hit: {e}" + ) return None @@ -120,14 +124,10 @@ def check_time_travel_active(): try: with open(config_file_path, "r") as config_file: config = yaml.safe_load(config_file) - if config.get("Time_Travel_Debugging_Active", True): - manage_time_travel_state(activated=True) - return True + return config.get("Time_Travel_Debugging_Active", False) except FileNotFoundError: return False - return False - def set_time_travel_active_state(is_active: bool): config_path = ".agentops_time_travel.yaml" @@ -144,30 +144,11 @@ def set_time_travel_active_state(is_active: bool): yaml.dump(config, config_file) except: print( - f"🖇 AgentOps: Unable to write to {config_path}. Time Travel not activated" + f"{ttd_prepend_string} Error - Unable to write to {config_path}. Time Travel not activated" ) return - if is_active: - manage_time_travel_state(activated=True) - print("🖇 AgentOps: Time Travel Activated") - else: - manage_time_travel_state(activated=False) - print("🖇 AgentOps: Time Travel Deactivated") - - -def add_time_travel_terminal_indicator(): - print(f"🖇️ ⏰ | ", end="") - - -def reset_terminal(): - print("\033[0m", end="") - - -def manage_time_travel_state(activated=False, error=None): - if activated: - add_time_travel_terminal_indicator() + if is_active: + print(f"{ttd_prepend_string} Activated") else: - reset_terminal() - if error is not None: - print(f"🖇 Deactivating Time Travel. Error with configuration: {error}") + print(f"{ttd_prepend_string} Deactivated") From 7b5d69436651da10744a19746351b2404e68ceab Mon Sep 17 00:00:00 2001 From: Alex Reibman Date: Tue, 17 Sep 2024 13:58:14 -0700 Subject: [PATCH 04/14] Fixed typos and spelling in docs from Spellcaster (#391) * Fixed typos and spelling in docs from Spellcaster * Small fixes --------- Co-authored-by: Howard Gil --- docs/snippets/add-env-tooltip.mdx | 2 +- docs/snippets/github-stars.mdx | 2 +- docs/v0/recording-events.mdx | 4 +-- docs/v1/concepts/decorators.mdx | 4 +-- docs/v1/concepts/sessions.mdx | 7 +++--- docs/v1/examples/langchain.mdx | 4 +-- docs/v1/integrations/cohere.mdx | 6 ++--- docs/v1/integrations/langchain.mdx | 26 ++++++++++---------- docs/v1/integrations/litellm.mdx | 12 ++++----- docs/v1/introduction.mdx | 4 +-- docs/v1/quickstart.mdx | 6 ++--- docs/v1/usage/langchain-callback-handler.mdx | 14 +++++------ docs/v1/usage/multiple-sessions.mdx | 2 +- docs/v1/usage/recording-events.mdx | 8 +++--- docs/v1/usage/sdk-reference.mdx | 8 +++--- docs/v1/usage/tracking-llm-calls.mdx | 4 +-- 16 files changed, 56 insertions(+), 57 deletions(-) diff --git a/docs/snippets/add-env-tooltip.mdx b/docs/snippets/add-env-tooltip.mdx index 240a4019b..a3b1c6a0d 100644 --- a/docs/snippets/add-env-tooltip.mdx +++ b/docs/snippets/add-env-tooltip.mdx @@ -1,3 +1,3 @@ - Set your API Key as an `.env` variable for easy access. + Set your API key as an `.env` variable for easy access. \ No newline at end of file diff --git a/docs/snippets/github-stars.mdx b/docs/snippets/github-stars.mdx index e31a312c6..1e73b3ccd 100644 --- a/docs/snippets/github-stars.mdx +++ b/docs/snippets/github-stars.mdx @@ -1 +1 @@ -Look useful? [Star us on Github](https://github.com/AgentOps-AI/agentops)! (you may be our 2,000th 😊) \ No newline at end of file +Look useful? [Star us on GitHub](https://github.com/AgentOps-AI/agentops)! (you may be our 2,000th 😊) \ No newline at end of file diff --git a/docs/v0/recording-events.mdx b/docs/v0/recording-events.mdx index c831a5a2a..ff5eccbe6 100644 --- a/docs/v0/recording-events.mdx +++ b/docs/v0/recording-events.mdx @@ -18,7 +18,7 @@ def sample_function(...): ... ``` -The the decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function. +The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function. record_action: @@ -36,7 +36,7 @@ ao_client.record(Event("event_type1")) ``` In AgentOps, each session is associated with a number of "Events". Events have -must have an "event_type" which is any abitrary string of your choice. It might be something +must have an "event_type" which is any arbitrary string of your choice. It might be something like "OpenAI Call". Events can also have other information such as the parameters of the operation, the returned data, alongside tags, etc. diff --git a/docs/v1/concepts/decorators.mdx b/docs/v1/concepts/decorators.mdx index 49425e7a5..01f372e30 100644 --- a/docs/v1/concepts/decorators.mdx +++ b/docs/v1/concepts/decorators.mdx @@ -17,13 +17,13 @@ If your implementation uses Classes to denote Agents, this decorator enables aut Learn more about tracking agents [here](/v1/usage/tracking-agents). ## `@record_action()` -Sometimes your agent system will use functions that are important to track as [`Actions`](/v1/concepts/events/#actionevent). +Sometimes, your agent system uses functions that are important to track as [`Actions`](/v1/concepts/events/#actionevent). Adding this decorator above any function will allow every instance of that function call to be tracked and displayed in your [Session](v1/concepts/sessions) Drill-Down on the dashboard. ## `@record_tool()` -Some functions are used as Tools. If you're not using an agent framework that records [`ToolEvents`](/v1/concepts/events/#toolevent) with AgentOps automatically, this decorator will record `ToolEvents` when the function is called. +Some functions are used as Tools. If you are not using an agent framework that records [`ToolEvents`](/v1/concepts/events/#toolevent) with AgentOps automatically, this decorator will record `ToolEvents` when the function is called. Adding this decorator above any function will allow every instance of that function call to be tracked and displayed in your [Session](v1/concepts/sessions) Drill-Down on the dashboard. diff --git a/docs/v1/concepts/sessions.mdx b/docs/v1/concepts/sessions.mdx index 25f95f88f..1da20fb34 100644 --- a/docs/v1/concepts/sessions.mdx +++ b/docs/v1/concepts/sessions.mdx @@ -58,7 +58,7 @@ Calling `agentops.init(auto_start_session=False)` will initialize the AgentOps S To start a session later, call `agentops.start_session()` [(reference)](/v1/usage/sdk-reference/#start-session) -Both `agentops.init()` and `agentops.start_session()` works as a factory pattern and returns a `Session` object. The above methods can all be called on this session object. +Both `agentops.init()` and `agentops.start_session()` work as a factory pattern and return a `Session` object. The above methods can all be called on this session object. ## Ending a Session If a process ends without any call to agentops, it will show in the dashboard as `Indeterminate`. @@ -71,7 +71,7 @@ with an existing session_id. `agentops.init(inherited_session_id=)` `agentops.start_session(inherited_session_id=)` -You can retrieve the current session_id by assigning the returned value from `init()` or `start_session()` +You can retrieve the current `session_id` by assigning the returned value from `init()` or `start_session()` ```python python @@ -90,8 +90,7 @@ Both processes will now contribute data to the same session. ## The AgentOps SDK Client _More info for the curious_ -Under the hood, `agentops.init()` sets up a `Client` object with various configuration options like your API key, worker thread options -for when to send out batches of events, etc. Whenever you start a new session, these configuration options will automatically +Under the hood, `agentops.init()` creates a `Client` object with various configuration options. Whenever you start a new session, these configuration options will automatically be applied. You can also apply different configuration options when you start a new session by passing in a [Configuration](/v1/usage/sdk-reference/#configuration) object. diff --git a/docs/v1/examples/langchain.mdx b/docs/v1/examples/langchain.mdx index bd121bb0e..dd59d4bd2 100644 --- a/docs/v1/examples/langchain.mdx +++ b/docs/v1/examples/langchain.mdx @@ -1,6 +1,6 @@ --- -title: 'Langchain Example' -description: 'Using the Langchain Callback Handler' +title: 'LangChain Example' +description: 'Using the LangChain Callback Handler' mode: "wide" --- _View Notebook on Github_ diff --git a/docs/v1/integrations/cohere.mdx b/docs/v1/integrations/cohere.mdx index f80cad05a..35771a8af 100644 --- a/docs/v1/integrations/cohere.mdx +++ b/docs/v1/integrations/cohere.mdx @@ -7,7 +7,7 @@ import CodeTooltip from '/snippets/add-code-tooltip.mdx' import EnvTooltip from '/snippets/add-env-tooltip.mdx' -This is a living integration. Should you need any added functionality message us on [Discord](https://discord.gg/UgJyyxx7uc)! +This is a living integration. Should you need any added functionality, message us on [Discord](https://discord.gg/UgJyyxx7uc)! @@ -41,7 +41,7 @@ This is a living integration. Should you need any added functionality message us - Requires cohere>=5.4.0 + Requires `cohere>=5.4.0` @@ -54,7 +54,7 @@ This is a living integration. Should you need any added functionality message us - Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agents! 🕵️ + Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agent! 🕵️ After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard diff --git a/docs/v1/integrations/langchain.mdx b/docs/v1/integrations/langchain.mdx index 575091f3b..1b3106917 100644 --- a/docs/v1/integrations/langchain.mdx +++ b/docs/v1/integrations/langchain.mdx @@ -1,16 +1,16 @@ --- -title: Langchain -description: "AgentOps provides first class support for Lanchain applications" +title: LangChain +description: "AgentOps provides first class support for LangChain applications" --- import EnvTooltip from '/snippets/add-env-tooltip.mdx' -AgentOps works seamlessly with applications built using Langchain. +AgentOps works seamlessly with applications built using LangChain. -## Adding AgentOps to Langchain applications +## Adding AgentOps to LangChain applications - + ```bash pip pip install agentops @@ -24,7 +24,7 @@ AgentOps works seamlessly with applications built using Langchain. [Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊) - Import the following Langchain and AgentOps dependencies + Import the following LangChain and AgentOps dependencies ```python python import os @@ -34,15 +34,15 @@ AgentOps works seamlessly with applications built using Langchain. ``` - + - Note that you don't need to set up a separate agentops.init() call, as the Langchain callback handler will automatically initialize the AgentOps client for you. + Note that you don't need to set up a separate agentops.init() call, as the LangChain callback handler will automatically initialize the AgentOps client for you. - Set up your Langchain agent with the AgentOps callback handler and AgentOps will automatically record your Langchain sessions. + Set up your LangChain agent with the AgentOps callback handler, and AgentOps will automatically record your LangChain sessions. ```python python - handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example']) + handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['LangChain Example']) @@ -70,9 +70,9 @@ AgentOps works seamlessly with applications built using Langchain. - Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Langchain Agent! 🕵️ + Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your LangChain Agent! 🕵️ - After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard + After your run, AgentOps prints a clickable URL to the console linking directly to your session in the Dashboard
{/* Intentionally blank div for newline */} @@ -90,7 +90,7 @@ AgentOps works seamlessly with applications built using Langchain. from langchain.agents import initialize_agent, AgentType from agentops.langchain_callback_handler import LangchainCallbackHandler - handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example']) + handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['LangChain Example']) llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, callbacks=[handler], diff --git a/docs/v1/integrations/litellm.mdx b/docs/v1/integrations/litellm.mdx index e82916c55..ed851b84c 100644 --- a/docs/v1/integrations/litellm.mdx +++ b/docs/v1/integrations/litellm.mdx @@ -1,27 +1,27 @@ --- title: LiteLLM description: "Call the latest models using the OpenAI format including: -Llama, Mistral, Claude, Gemini, Gemma, Dall-E, Whisper" +Llama, Mistral, Claude, Gemini, Gemma, DALL-E, Whisper" --- ## LiteLLM From [LiteLLM's docs](https://docs.litellm.ai/docs/): -Call 100+ LLMs using the same Input/Output Format +Call 400+ LLMs using the same input/output Format - Translate inputs to provider's `completion`, `embedding`, and `image_generation` endpoints -- Consistent output. Text responses will always be available at `['choices'][0]['message']['content']` +- Consistent output. Text responses will always be available at `['choices'][0]['message']['content']`. - Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) - Track spend & set budgets per project -LiteLLM also supports many [providers](https://docs.litellm.ai/docs/providers) +LiteLLM also supports many [providers](https://docs.litellm.ai/docs/providers). ## Using AgentOps with LiteLLM -### Requires litellm>=1.3.1 +### Requires `litellm>=1.3.1` -AgentOps requires you to make a minor adjustment to how you call LiteLLM. +AgentOps requires a minor adjustment to how you call LiteLLM. ```python python # Do not use LiteLLM like this diff --git a/docs/v1/introduction.mdx b/docs/v1/introduction.mdx index e7992adce..db5367b43 100644 --- a/docs/v1/introduction.mdx +++ b/docs/v1/introduction.mdx @@ -32,7 +32,7 @@ And we do it all in just two lines of code... ## The AgentOps Dashboard -With just two lines of code, you can free yourself from the chains of the terminal and instead visualize your agents' behavior +With just two lines of code, you can free yourself from the chains of the terminal and, instead, visualize your agents' behavior in your AgentOps Dashboard. After setting up AgentOps, each execution of your program is recorded as a session and the above data is automatically recorded for you. @@ -53,7 +53,7 @@ Find any past sessions from your Session Drawer. Most powerful of all is the Session Waterfall. On the left, a time visualization of all your LLM calls, Action events, Tool calls, and Errors. -On the right, specific details about the event you've selected on the waterfall. For instance the exact prompt and completion for a given LLM call. +On the right, specific details about the event you've selected on the waterfall. For instance the exact prompt and completion for a given LLM call. Most of which has been automatically recorded for you. diff --git a/docs/v1/quickstart.mdx b/docs/v1/quickstart.mdx index 3bc5e2428..5df0c48c5 100644 --- a/docs/v1/quickstart.mdx +++ b/docs/v1/quickstart.mdx @@ -32,7 +32,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agent! 🕵️ - After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard + After your run, AgentOps prints a clickable URL to console linking directly to your session in the Dashboard
{/* Intentionally blank div for newline */} @@ -52,7 +52,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' will see these function calls alongside your LLM calls from instantiating the AgentOps client. ```python python # (record specific functions) - @agentops.record_action('sample function being record') + @agentops.record_action('sample function being recorded') def sample_function(...): ... ``` @@ -70,7 +70,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' - Finally, you should end your session by calling `.end_session()` with whether your session + Finally, you should end your session by calling `.end_session()` indicating whether your session was successful or not `(Success|Fail)`. We suggest setting session state depending on how your agent exits or whether your agent succeeded or not. You can also specify a end state reason, such as user interrupted, ran to completion, or unhandled exception. diff --git a/docs/v1/usage/langchain-callback-handler.mdx b/docs/v1/usage/langchain-callback-handler.mdx index 9b36644d2..8337a2871 100644 --- a/docs/v1/usage/langchain-callback-handler.mdx +++ b/docs/v1/usage/langchain-callback-handler.mdx @@ -1,12 +1,12 @@ --- -title: 'Langchain Callback Handler' -description: 'How to use AgentOps with Langchain' +title: 'LangChain Callback Handler' +description: 'How to use AgentOps with LangChain' --- -By default, AgentOps is compatible with agents using Langchain with our LLM Instrumentor as long as they're using +By default, AgentOps is compatible with agents using LangChain with our LLM Instrumentor as long as they're using supported models. -As an alternative to instrumenting, the Langchain Callback Handler is available. +As an alternative to instrumenting, the LangChain Callback Handler is available. ## Constructor @@ -41,7 +41,7 @@ properly include `instrument_llm_calls=False`. In this case, call ### Implement Callback Handler -Initialize the handler with its constructor and pass it into the callbacks array from Langchain. +Initialize the handler with its constructor and pass it into the callbacks array from LangChain. ```python from agentops.langchain_callback_handler import LangchainCallbackHandler ChatOpenAI(callbacks=[LangchainCallbackHandler()]) @@ -63,9 +63,9 @@ response = chain.invoke({"animal": "bears"}) ## Why use the handler? -If your project uses Langchain for Agents, Events and Tools, it may be easier to use the callback Handler for observability. +If your project uses LangChain for Agents, Events and Tools, it may be easier to use the callback Handler for observability. -If your project uses models with Langchain that are not yet supported by AgentOps, they can be supported by the Handler. +If your project uses models with LangChain that are not yet supported by AgentOps, they can be supported by the Handler. diff --git a/docs/v1/usage/multiple-sessions.mdx b/docs/v1/usage/multiple-sessions.mdx index 38c29aac6..f84c18a6b 100644 --- a/docs/v1/usage/multiple-sessions.mdx +++ b/docs/v1/usage/multiple-sessions.mdx @@ -159,7 +159,7 @@ session.record(Event(...)) # Assigning LLM Calls When we have multiple active sessions, it's impossible for AgentOps to know which session a particular LLM call belongs to without a little help. -To track an LLM Call, use [`session.patch()`](/v1/concepts/sessions#patch) +To track an LLM call, use [`session.patch()`](/v1/concepts/sessions#patch) ```python import agentops diff --git a/docs/v1/usage/recording-events.mdx b/docs/v1/usage/recording-events.mdx index f7fec071a..361188ec4 100644 --- a/docs/v1/usage/recording-events.mdx +++ b/docs/v1/usage/recording-events.mdx @@ -16,12 +16,12 @@ and record an event for your function. ```python python from agentops import record_action -@record_action('sample function being record') +@record_action('sample function being recorded') def sample_function(...): ... ``` -The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function. +The decorator will record the function's parameters, return values, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several OpenAI calls, then each OpenAI call will show in the replay graph as a child of the decorated function. ## `@record_tool` Decorator @@ -37,12 +37,12 @@ def sample_tool(...): ... ``` -The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function. +The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several OpenAI calls, then each OpenAI call will show in the replay graph as a child of the decorated function. ## `record()` Method -From this point, simply call the .record() method in the AgentOps client: +From this point, simply call the `.record()` method in the AgentOps client: Record any child of the [Event type](/v1/concepts/events) or ErrorEvent. diff --git a/docs/v1/usage/sdk-reference.mdx b/docs/v1/usage/sdk-reference.mdx index 647ba101c..876aee697 100644 --- a/docs/v1/usage/sdk-reference.mdx +++ b/docs/v1/usage/sdk-reference.mdx @@ -51,7 +51,7 @@ Start a new [Session](/v1/concepts/sessions) for recording events. ### `end_session()` -End the current session with the AgentOps service. +Ends the current session with the AgentOps service. **Parameters**: @@ -113,8 +113,8 @@ Set the parent API key which has visibility over projects it is a parent of. ### `stop_instrumenting()` -Stop instrumenting LLM calls. This is typically used by agent frameworks (i.e. [CrewAI](/v1/integrations/crewai), -[autogen](/v1/integrations/autogen)) to stop using the AgentOps auto instrumentation of LLM libraries like OpenAI. This +Stops instrumenting LLM calls. This is typically used by agent frameworks (i.e., [CrewAI](/v1/integrations/crewai) and +[autogen](/v1/integrations/autogen)) to stop using AgentOps' auto-instrumentation of LLM libraries such as OpenAI. This allows these frameworks to use their own instrumenting or callback handler. @@ -151,7 +151,7 @@ Stores the configuration settings for AgentOps clients. [Reference](/v1/usage/langchain-callback-handler) This callback handler is intended to be used as an option in place of AgentOps auto-instrumenting. This is only useful -when using Langchain as your LLM calling library. +when using LangChain as your LLM calling library. diff --git a/docs/v1/usage/tracking-llm-calls.mdx b/docs/v1/usage/tracking-llm-calls.mdx index 9cb93ab8b..b448fd18f 100644 --- a/docs/v1/usage/tracking-llm-calls.mdx +++ b/docs/v1/usage/tracking-llm-calls.mdx @@ -14,7 +14,7 @@ Try these steps: 1. Make sure you have the latest version of the AgentOps SDK installed. We are constantly updating it to support new LLM libraries and releases. 2. Make sure you are calling `agentops.init()` *after* importing the LLM module but *before* you are calling the LLM method. 3. Make sure the `instrument_llm_calls` parameter of `agentops.init()` is set to `True` (default). -4. Make sure if you have more than one concurrent session, to patch the LLM call as described [here](/v1/usage/multiple-sssions). +4. Make sure if you have more than one concurrent session, you patch the LLM call as described [here](/v1/usage/multiple-sssions). Still not working? Please let us know! You can find us on [Discord](https://discord.gg/DR2abmETjZ), [GitHub](https://github.com/AgentOps-AI/agentops), @@ -32,7 +32,7 @@ To get started, just follow the quick start guide. To stop tracking LLM calls after running `agentops.init()`, you can call `agentops.stop_instrumenting()`. -This function reverts the changes made to your LLM Provider's module, removing AgentOps instrumentation. +This function reverts the changes made to your LLM provider's module, removing AgentOps instrumentation. _Special consideration for Cohere: Calling `stop_instrumenting()` has no effect on previously instantiated Cohere clients. You must create a new Cohere client after calling this function._ From fecf20cf8c6597a32975dc89d05b3474aa0df0ba Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Tue, 17 Sep 2024 13:58:56 -0700 Subject: [PATCH 05/14] Fixing duplicate llm_pkey errors (#389) * Fixed for multithreading and clarity * dont need threading * leftover line * fixed for threading * saving state. will replace with better solution * removing self from llm_event in handle_response llm tracker logic due to multithreading issue * cleaning up time travel output * removing unused line --------- Co-authored-by: Shawn Qiu --- agentops/llms/__init__.py | 1 - agentops/llms/anthropic.py | 50 ++++++++++++++++++-------------------- agentops/llms/cohere.py | 46 +++++++++++++++++------------------ agentops/llms/groq.py | 50 ++++++++++++++++++-------------------- agentops/llms/litellm.py | 50 ++++++++++++++++++-------------------- agentops/llms/ollama.py | 40 +++++++++++++++--------------- agentops/llms/openai.py | 50 ++++++++++++++++++-------------------- 7 files changed, 135 insertions(+), 152 deletions(-) diff --git a/agentops/llms/__init__.py b/agentops/llms/__init__.py index e3e6f7cf9..8c7ba5f4a 100644 --- a/agentops/llms/__init__.py +++ b/agentops/llms/__init__.py @@ -43,7 +43,6 @@ class LlmTracker: def __init__(self, client): self.client = client - self.completion = "" def override_api(self): """ diff --git a/agentops/llms/anthropic.py b/agentops/llms/anthropic.py index 322d21815..e0e78891f 100644 --- a/agentops/llms/anthropic.py +++ b/agentops/llms/anthropic.py @@ -31,27 +31,27 @@ def handle_response( from anthropic.resources import AsyncMessages from anthropic.types import Message - self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) if session is not None: - self.llm_event.session_id = session.session_id + llm_event.session_id = session.session_id def handle_stream_chunk(chunk: Message): try: # We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion if chunk.type == "message_start": - self.llm_event.returns = chunk - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.model = kwargs["model"] - self.llm_event.prompt = kwargs["messages"] - self.llm_event.prompt_tokens = chunk.message.usage.input_tokens - self.llm_event.completion = { + llm_event.returns = chunk + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.model = kwargs["model"] + llm_event.prompt = kwargs["messages"] + llm_event.prompt_tokens = chunk.message.usage.input_tokens + llm_event.completion = { "role": chunk.message.role, "content": "", # Always returned as [] in this instance type } elif chunk.type == "content_block_start": if chunk.content_block.type == "text": - self.llm_event.completion["content"] += chunk.content_block.text + llm_event.completion["content"] += chunk.content_block.text elif chunk.content_block.type == "tool_use": self.tool_id = chunk.content_block.id @@ -62,7 +62,7 @@ def handle_stream_chunk(chunk: Message): elif chunk.type == "content_block_delta": if chunk.delta.type == "text_delta": - self.llm_event.completion["content"] += chunk.delta.text + llm_event.completion["content"] += chunk.delta.text elif chunk.delta.type == "input_json_delta": self.tool_event[self.tool_id].logs[ @@ -73,15 +73,15 @@ def handle_stream_chunk(chunk: Message): pass elif chunk.type == "message_delta": - self.llm_event.completion_tokens = chunk.usage.output_tokens + llm_event.completion_tokens = chunk.usage.output_tokens elif chunk.type == "message_stop": - self.llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, self.llm_event) + llm_event.end_timestamp = get_ISO_time() + self._safe_record(session, llm_event) except Exception as e: self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) + session, ErrorEvent(trigger_event=llm_event, exception=e) ) kwargs_str = pprint.pformat(kwargs) @@ -124,23 +124,21 @@ async def async_generator(): # Handle object responses try: - self.llm_event.returns = response.model_dump() - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = kwargs["messages"] - self.llm_event.prompt_tokens = response.usage.input_tokens - self.llm_event.completion = { + llm_event.returns = response.model_dump() + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.prompt = kwargs["messages"] + llm_event.prompt_tokens = response.usage.input_tokens + llm_event.completion = { "role": "assistant", "content": response.content[0].text, } - self.llm_event.completion_tokens = response.usage.output_tokens - self.llm_event.model = response.model - self.llm_event.end_timestamp = get_ISO_time() + llm_event.completion_tokens = response.usage.output_tokens + llm_event.model = response.model + llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, self.llm_event) + self._safe_record(session, llm_event) except Exception as e: - self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) - ) + self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) kwargs_str = pprint.pformat(kwargs) response = pprint.pformat(response) logger.warning( diff --git a/agentops/llms/cohere.py b/agentops/llms/cohere.py index 68658761e..d76c221db 100644 --- a/agentops/llms/cohere.py +++ b/agentops/llms/cohere.py @@ -52,9 +52,9 @@ def handle_response( # from cohere.types.chat import ChatGenerationChunk # NOTE: Cohere only returns one message and its role will be CHATBOT which we are coercing to "assistant" - self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) if session is not None: - self.llm_event.session_id = session.session_id + llm_event.session_id = session.session_id self.action_events = {} @@ -62,22 +62,22 @@ def handle_stream_chunk(chunk, session: Optional[Session] = None): # We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion if isinstance(chunk, StreamedChatResponse_StreamStart): - self.llm_event.returns = chunk - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.model = kwargs.get("model", "command-r-plus") - self.llm_event.prompt = kwargs["message"] - self.llm_event.completion = "" + llm_event.returns = chunk + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.model = kwargs.get("model", "command-r-plus") + llm_event.prompt = kwargs["message"] + llm_event.completion = "" return try: if isinstance(chunk, StreamedChatResponse_StreamEnd): # StreamedChatResponse_TextGeneration = LLMEvent - self.llm_event.completion = { + llm_event.completion = { "role": "assistant", "content": chunk.response.text, } - self.llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, self.llm_event) + llm_event.end_timestamp = get_ISO_time() + self._safe_record(session, llm_event) # StreamedChatResponse_SearchResults = ActionEvent search_results = chunk.response.search_results @@ -115,7 +115,7 @@ def handle_stream_chunk(chunk, session: Optional[Session] = None): self._safe_record(session, action_event) elif isinstance(chunk, StreamedChatResponse_TextGeneration): - self.llm_event.completion += chunk.text + llm_event.completion += chunk.text elif isinstance(chunk, StreamedChatResponse_ToolCallsGeneration): pass elif isinstance(chunk, StreamedChatResponse_CitationGeneration): @@ -139,7 +139,7 @@ def handle_stream_chunk(chunk, session: Optional[Session] = None): except Exception as e: self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) + session, ErrorEvent(trigger_event=llm_event, exception=e) ) kwargs_str = pprint.pformat(kwargs) @@ -175,15 +175,15 @@ def generator(): # Not enough to record StreamedChatResponse_ToolCallsGeneration because the tool may have not gotten called try: - self.llm_event.returns = response - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = [] + llm_event.returns = response + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.prompt = [] if response.chat_history: role_map = {"USER": "user", "CHATBOT": "assistant", "SYSTEM": "system"} for i in range(len(response.chat_history) - 1): message = response.chat_history[i] - self.llm_event.prompt.append( + llm_event.prompt.append( { "role": role_map.get(message.role, message.role), "content": message.message, @@ -191,19 +191,17 @@ def generator(): ) last_message = response.chat_history[-1] - self.llm_event.completion = { + llm_event.completion = { "role": role_map.get(last_message.role, last_message.role), "content": last_message.message, } - self.llm_event.prompt_tokens = response.meta.tokens.input_tokens - self.llm_event.completion_tokens = response.meta.tokens.output_tokens - self.llm_event.model = kwargs.get("model", "command-r-plus") + llm_event.prompt_tokens = response.meta.tokens.input_tokens + llm_event.completion_tokens = response.meta.tokens.output_tokens + llm_event.model = kwargs.get("model", "command-r-plus") - self._safe_record(session, self.llm_event) + self._safe_record(session, llm_event) except Exception as e: - self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) - ) + self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) kwargs_str = pprint.pformat(kwargs) response = pprint.pformat(response) logger.warning( diff --git a/agentops/llms/groq.py b/agentops/llms/groq.py index 7d5f68005..ca8696387 100644 --- a/agentops/llms/groq.py +++ b/agentops/llms/groq.py @@ -37,21 +37,21 @@ def handle_response( from groq.resources.chat import AsyncCompletions from groq.types.chat import ChatCompletionChunk - self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) if session is not None: - self.llm_event.session_id = session.session_id + llm_event.session_id = session.session_id def handle_stream_chunk(chunk: ChatCompletionChunk): # NOTE: prompt/completion usage not returned in response when streaming # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if self.llm_event.returns == None: - self.llm_event.returns = chunk + if llm_event.returns == None: + llm_event.returns = chunk try: - accumulated_delta = self.llm_event.returns.choices[0].delta - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.model = chunk.model - self.llm_event.prompt = kwargs["messages"] + accumulated_delta = llm_event.returns.choices[0].delta + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.model = chunk.model + llm_event.prompt = kwargs["messages"] # NOTE: We assume for completion only choices[0] is relevant choice = chunk.choices[0] @@ -70,21 +70,19 @@ def handle_stream_chunk(chunk: ChatCompletionChunk): if choice.finish_reason: # Streaming is done. Record LLMEvent - self.llm_event.returns.choices[0].finish_reason = ( - choice.finish_reason - ) - self.llm_event.completion = { + llm_event.returns.choices[0].finish_reason = choice.finish_reason + llm_event.completion = { "role": accumulated_delta.role, "content": accumulated_delta.content, "function_call": accumulated_delta.function_call, "tool_calls": accumulated_delta.tool_calls, } - self.llm_event.end_timestamp = get_ISO_time() + llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, self.llm_event) + self._safe_record(session, llm_event) except Exception as e: self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) + session, ErrorEvent(trigger_event=llm_event, exception=e) ) kwargs_str = pprint.pformat(kwargs) @@ -127,19 +125,17 @@ async def async_generator(): # v1.0.0+ responses are objects try: - self.llm_event.returns = response.model_dump() - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = kwargs["messages"] - self.llm_event.prompt_tokens = response.usage.prompt_tokens - self.llm_event.completion = response.choices[0].message.model_dump() - self.llm_event.completion_tokens = response.usage.completion_tokens - self.llm_event.model = response.model - - self._safe_record(session, self.llm_event) + llm_event.returns = response.model_dump() + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.prompt = kwargs["messages"] + llm_event.prompt_tokens = response.usage.prompt_tokens + llm_event.completion = response.choices[0].message.model_dump() + llm_event.completion_tokens = response.usage.completion_tokens + llm_event.model = response.model + + self._safe_record(session, llm_event) except Exception as e: - self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) - ) + self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) kwargs_str = pprint.pformat(kwargs) response = pprint.pformat(response) diff --git a/agentops/llms/litellm.py b/agentops/llms/litellm.py index 053c42516..30b4c25a2 100644 --- a/agentops/llms/litellm.py +++ b/agentops/llms/litellm.py @@ -49,21 +49,21 @@ def handle_response( from openai.types.chat import ChatCompletionChunk from litellm.utils import CustomStreamWrapper - self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) if session is not None: - self.llm_event.session_id = session.session_id + llm_event.session_id = session.session_id def handle_stream_chunk(chunk: ChatCompletionChunk): # NOTE: prompt/completion usage not returned in response when streaming # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if self.llm_event.returns == None: - self.llm_event.returns = chunk + if llm_event.returns == None: + llm_event.returns = chunk try: - accumulated_delta = self.llm_event.returns.choices[0].delta - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.model = chunk.model - self.llm_event.prompt = kwargs["messages"] + accumulated_delta = llm_event.returns.choices[0].delta + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.model = chunk.model + llm_event.prompt = kwargs["messages"] # NOTE: We assume for completion only choices[0] is relevant choice = chunk.choices[0] @@ -82,21 +82,19 @@ def handle_stream_chunk(chunk: ChatCompletionChunk): if choice.finish_reason: # Streaming is done. Record LLMEvent - self.llm_event.returns.choices[0].finish_reason = ( - choice.finish_reason - ) - self.llm_event.completion = { + llm_event.returns.choices[0].finish_reason = choice.finish_reason + llm_event.completion = { "role": accumulated_delta.role, "content": accumulated_delta.content, "function_call": accumulated_delta.function_call, "tool_calls": accumulated_delta.tool_calls, } - self.llm_event.end_timestamp = get_ISO_time() + llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, self.llm_event) + self._safe_record(session, llm_event) except Exception as e: self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) + session, ErrorEvent(trigger_event=llm_event, exception=e) ) kwargs_str = pprint.pformat(kwargs) @@ -149,19 +147,17 @@ async def async_generator(): # v1.0.0+ responses are objects try: - self.llm_event.returns = response - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = kwargs["messages"] - self.llm_event.prompt_tokens = response.usage.prompt_tokens - self.llm_event.completion = response.choices[0].message.model_dump() - self.llm_event.completion_tokens = response.usage.completion_tokens - self.llm_event.model = response.model - - self._safe_record(session, self.llm_event) + llm_event.returns = response + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.prompt = kwargs["messages"] + llm_event.prompt_tokens = response.usage.prompt_tokens + llm_event.completion = response.choices[0].message.model_dump() + llm_event.completion_tokens = response.usage.completion_tokens + llm_event.model = response.model + + self._safe_record(session, llm_event) except Exception as e: - self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) - ) + self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) kwargs_str = pprint.pformat(kwargs) response = pprint.pformat(response) diff --git a/agentops/llms/ollama.py b/agentops/llms/ollama.py index bdcb21909..d17aba9ad 100644 --- a/agentops/llms/ollama.py +++ b/agentops/llms/ollama.py @@ -19,25 +19,25 @@ class OllamaProvider(InstrumentedProvider): def handle_response( self, response, kwargs, init_timestamp, session: Optional[Session] = None ) -> dict: - self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) def handle_stream_chunk(chunk: dict): message = chunk.get("message", {"role": None, "content": ""}) if chunk.get("done"): - self.llm_event.completion["content"] += message.get("content") - self.llm_event.end_timestamp = get_ISO_time() - self.llm_event.model = f'ollama/{chunk.get("model")}' - self.llm_event.returns = chunk - self.llm_event.returns["message"] = self.llm_event.completion - self.llm_event.prompt = kwargs["messages"] - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.client.record(self.llm_event) - - if self.llm_event.completion is None: - self.llm_event.completion = message + llm_event.completion["content"] += message.get("content") + llm_event.end_timestamp = get_ISO_time() + llm_event.model = f'ollama/{chunk.get("model")}' + llm_event.returns = chunk + llm_event.returns["message"] = llm_event.completion + llm_event.prompt = kwargs["messages"] + llm_event.agent_id = check_call_stack_for_agent_id() + self.client.record(llm_event) + + if llm_event.completion is None: + llm_event.completion = message else: - self.llm_event.completion["content"] += message.get("content") + llm_event.completion["content"] += message.get("content") if inspect.isgenerator(response): @@ -48,15 +48,15 @@ def generator(): return generator() - self.llm_event.end_timestamp = get_ISO_time() + llm_event.end_timestamp = get_ISO_time() - self.llm_event.model = f'ollama/{response["model"]}' - self.llm_event.returns = response - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = kwargs["messages"] - self.llm_event.completion = response["message"] + llm_event.model = f'ollama/{response["model"]}' + llm_event.returns = response + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.prompt = kwargs["messages"] + llm_event.completion = response["message"] - self._safe_record(session, self.llm_event) + self._safe_record(session, llm_event) return response def override(self): diff --git a/agentops/llms/openai.py b/agentops/llms/openai.py index 0fd31a1d2..c99523d72 100644 --- a/agentops/llms/openai.py +++ b/agentops/llms/openai.py @@ -30,21 +30,21 @@ def handle_response( from openai.resources import AsyncCompletions from openai.types.chat import ChatCompletionChunk - self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) if session is not None: - self.llm_event.session_id = session.session_id + llm_event.session_id = session.session_id def handle_stream_chunk(chunk: ChatCompletionChunk): # NOTE: prompt/completion usage not returned in response when streaming # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if self.llm_event.returns == None: - self.llm_event.returns = chunk + if llm_event.returns == None: + llm_event.returns = chunk try: - accumulated_delta = self.llm_event.returns.choices[0].delta - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.model = chunk.model - self.llm_event.prompt = kwargs["messages"] + accumulated_delta = llm_event.returns.choices[0].delta + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.model = chunk.model + llm_event.prompt = kwargs["messages"] # NOTE: We assume for completion only choices[0] is relevant choice = chunk.choices[0] @@ -63,21 +63,19 @@ def handle_stream_chunk(chunk: ChatCompletionChunk): if choice.finish_reason: # Streaming is done. Record LLMEvent - self.llm_event.returns.choices[0].finish_reason = ( - choice.finish_reason - ) - self.llm_event.completion = { + llm_event.returns.choices[0].finish_reason = choice.finish_reason + llm_event.completion = { "role": accumulated_delta.role, "content": accumulated_delta.content, "function_call": accumulated_delta.function_call, "tool_calls": accumulated_delta.tool_calls, } - self.llm_event.end_timestamp = get_ISO_time() + llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, self.llm_event) + self._safe_record(session, llm_event) except Exception as e: self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) + session, ErrorEvent(trigger_event=llm_event, exception=e) ) kwargs_str = pprint.pformat(kwargs) @@ -120,19 +118,17 @@ async def async_generator(): # v1.0.0+ responses are objects try: - self.llm_event.returns = response - self.llm_event.agent_id = check_call_stack_for_agent_id() - self.llm_event.prompt = kwargs["messages"] - self.llm_event.prompt_tokens = response.usage.prompt_tokens - self.llm_event.completion = response.choices[0].message.model_dump() - self.llm_event.completion_tokens = response.usage.completion_tokens - self.llm_event.model = response.model - - self._safe_record(session, self.llm_event) + llm_event.returns = response + llm_event.agent_id = check_call_stack_for_agent_id() + llm_event.prompt = kwargs["messages"] + llm_event.prompt_tokens = response.usage.prompt_tokens + llm_event.completion = response.choices[0].message.model_dump() + llm_event.completion_tokens = response.usage.completion_tokens + llm_event.model = response.model + + self._safe_record(session, llm_event) except Exception as e: - self._safe_record( - session, ErrorEvent(trigger_event=self.llm_event, exception=e) - ) + self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) kwargs_str = pprint.pformat(kwargs) response = pprint.pformat(response) From 26261b277c5b6d1e6b09cb2ace034631f54a421b Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Wed, 18 Sep 2024 06:16:04 +0900 Subject: [PATCH 06/14] Changes for crew refactor (#393) * remove crew langchain prep * support crew by version * remove comment --- agentops/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/agentops/__init__.py b/agentops/__init__.py index 199835486..cc27af432 100755 --- a/agentops/__init__.py +++ b/agentops/__init__.py @@ -9,6 +9,8 @@ from .log_config import logger from .session import Session import threading +from importlib.metadata import version as get_version +from packaging import version try: from .partners.langchain_callback_handler import ( @@ -23,7 +25,13 @@ Client().add_default_tags(["autogen"]) if "crewai" in sys.modules: - Client().configure(instrument_llm_calls=False) + crew_version = version.parse(get_version("crewai")) + + if crew_version < version.parse("0.56.0"): # uses langchain + Client().configure(instrument_llm_calls=False) + else: # uses LiteLLM + Client().configure(instrument_llm_calls=True) + Client().add_default_tags(["crewai"]) From 3e44f9f08cdf67df0bfe14c7f1c5dfc08d7010b2 Mon Sep 17 00:00:00 2001 From: Shawn Qiu Date: Tue, 17 Sep 2024 14:18:57 -0700 Subject: [PATCH 07/14] bump version to 0.3.11 (#394) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9f3661570..f3d7b9fa4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "agentops" -version = "0.3.10" +version = "0.3.11" authors = [ { name="Alex Reibman", email="areibman@gmail.com" }, { name="Shawn Qiu", email="siyangqiu@gmail.com" }, From 1ef969e47d6546048bb9caa3343431b6d198b4e1 Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Wed, 18 Sep 2024 06:20:03 +0900 Subject: [PATCH 08/14] remove true condition (#395) * remove crew langchain prep * support crew by version * remove comment * no true condition --- agentops/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/agentops/__init__.py b/agentops/__init__.py index cc27af432..5a054f755 100755 --- a/agentops/__init__.py +++ b/agentops/__init__.py @@ -27,10 +27,9 @@ if "crewai" in sys.modules: crew_version = version.parse(get_version("crewai")) - if crew_version < version.parse("0.56.0"): # uses langchain + # uses langchain, greater versions will use litellm and default is to instrument + if crew_version < version.parse("0.56.0"): Client().configure(instrument_llm_calls=False) - else: # uses LiteLLM - Client().configure(instrument_llm_calls=True) Client().add_default_tags(["crewai"]) From 51433d9e4573ce9f22a7220290f8cf00b3e2157a Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Wed, 18 Sep 2024 06:37:01 +0900 Subject: [PATCH 09/14] ollama undo instrument (#397) --- agentops/llms/ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agentops/llms/ollama.py b/agentops/llms/ollama.py index d17aba9ad..e57792832 100644 --- a/agentops/llms/ollama.py +++ b/agentops/llms/ollama.py @@ -65,7 +65,7 @@ def override(self): self._override_chat_async_client() def undo_override(self): - if original_func is not None: + if original_func is not None and original_func != {}: import ollama ollama.chat = original_func["ollama.chat"] From 6324bf13f4692e948d41dd3e3f7466bdac837dc3 Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Tue, 17 Sep 2024 14:48:50 -0700 Subject: [PATCH 10/14] cohere version 5.9 support --- agentops/llms/cohere.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agentops/llms/cohere.py b/agentops/llms/cohere.py index d76c221db..ad8f93a53 100644 --- a/agentops/llms/cohere.py +++ b/agentops/llms/cohere.py @@ -195,8 +195,8 @@ def generator(): "role": role_map.get(last_message.role, last_message.role), "content": last_message.message, } - llm_event.prompt_tokens = response.meta.tokens.input_tokens - llm_event.completion_tokens = response.meta.tokens.output_tokens + llm_event.prompt_tokens = int(response.meta.tokens.input_tokens) + llm_event.completion_tokens = int(response.meta.tokens.output_tokens) llm_event.model = kwargs.get("model", "command-r-plus") self._safe_record(session, llm_event) From c3b400f27c2f8ab8b865fef80f2463d565c83a1b Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Tue, 17 Sep 2024 16:21:22 -0700 Subject: [PATCH 11/14] Automating addition of examples to docs (#396) * WIP automating addition of examples to docs * Ready * only on changes to specified folders * Added source file mapping to docs --- .../add-markdown-examples-to-docs.yml | 76 +++++++++++++++++++ docs/v1/examples/langchain.mdx | 3 +- docs/v1/examples/multi_agent.mdx | 3 +- docs/v1/examples/multi_session.mdx | 3 +- docs/v1/examples/multion.mdx | 11 +-- docs/v1/examples/recording_events.mdx | 3 +- docs/v1/examples/simple_agent.mdx | 3 +- 7 files changed, 84 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/add-markdown-examples-to-docs.yml diff --git a/.github/workflows/add-markdown-examples-to-docs.yml b/.github/workflows/add-markdown-examples-to-docs.yml new file mode 100644 index 000000000..03e62824d --- /dev/null +++ b/.github/workflows/add-markdown-examples-to-docs.yml @@ -0,0 +1,76 @@ +name: Update Docs + +on: + push: + branches: + - main + paths: + - 'examples/**' + - 'docs/v1/examples/**' + +jobs: + add-markdown-examples-to-docs: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + pip install jupyter nbconvert + + - name: Convert notebooks to markdown and add to docs + run: | + set -x # Enable debug mode + for file in docs/v1/examples/*.mdx; do + echo "Processing file: $file" + source_file=$(grep -oP '(?<=\{/\* SOURCE_FILE: ).*(?= \*/\})' "$file" || true) + if [[ -z "$source_file" ]]; then + echo "No source file found in $file, skipping..." + continue + fi + echo "Source file: $source_file" + if [[ -f "$source_file" ]]; then + echo "Converting notebook to markdown" + jupyter nbconvert --to markdown "$source_file" || { echo "Failed to convert $source_file"; continue; } + markdown_file="${source_file%.ipynb}.md" + echo "Appending markdown to $file" + echo -e "\n\n" >> "$file" + cat "$markdown_file" >> "$file" || { echo "Failed to append markdown to $file"; continue; } + echo "Contents of $file after appending markdown:" + cat "$file" + echo "Removing temporary markdown file" + rm "$markdown_file" || { echo "Failed to remove $markdown_file"; continue; } + else + echo "Source file not found: $source_file" + fi + done + + - name: Commit changes + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add docs/v1/examples/*.mdx + git diff --quiet && git diff --staged --quiet || git commit -m "GitHub Action: Update examples in docs from notebooks" + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ secrets.GITHUB_TOKEN }} + commit-message: Update examples in docs from notebooks + title: 'Update examples in docs from notebooks' + body: | + This PR updates the examples in the docs from the corresponding notebooks. + Please review the changes before merging. + branch: update-docs-examples + base: main +# - name: Push changes +# uses: ad-m/github-push-action@master +# with: +# github_token: ${{ secrets.GITHUB_TOKEN }} +# branch: main \ No newline at end of file diff --git a/docs/v1/examples/langchain.mdx b/docs/v1/examples/langchain.mdx index dd59d4bd2..5e6c73a4d 100644 --- a/docs/v1/examples/langchain.mdx +++ b/docs/v1/examples/langchain.mdx @@ -5,5 +5,4 @@ mode: "wide" --- _View Notebook on Github_ -
- \ No newline at end of file +{/* SOURCE_FILE: examples/langchain_examples.ipynb */} \ No newline at end of file diff --git a/docs/v1/examples/multi_agent.mdx b/docs/v1/examples/multi_agent.mdx index 6ceb1deb9..40133490a 100644 --- a/docs/v1/examples/multi_agent.mdx +++ b/docs/v1/examples/multi_agent.mdx @@ -5,5 +5,4 @@ mode: "wide" --- _View Notebook on Github_ -
- +{/* SOURCE_FILE: examples/multi_agent_example.ipynb */} \ No newline at end of file diff --git a/docs/v1/examples/multi_session.mdx b/docs/v1/examples/multi_session.mdx index 976727eb2..794a7865c 100644 --- a/docs/v1/examples/multi_session.mdx +++ b/docs/v1/examples/multi_session.mdx @@ -5,5 +5,4 @@ mode: "wide" --- _View Notebook on Github_ -
- \ No newline at end of file +{ /* SOURCE_FILE: examples/multi_session_llm.ipynb */} \ No newline at end of file diff --git a/docs/v1/examples/multion.mdx b/docs/v1/examples/multion.mdx index f7a90a820..6b1cbefe8 100644 --- a/docs/v1/examples/multion.mdx +++ b/docs/v1/examples/multion.mdx @@ -4,11 +4,6 @@ description: 'Tracking Multion usage with AgentOps' mode: "wide" --- -
-
-
-
-
-
-
- \ No newline at end of file +_View All Notebooks on Github_ + +{/* SOURCE_FILE: examples/multion_examples/Autonomous_web_browsing.ipynb */} diff --git a/docs/v1/examples/recording_events.mdx b/docs/v1/examples/recording_events.mdx index 237735217..7d08f7f74 100644 --- a/docs/v1/examples/recording_events.mdx +++ b/docs/v1/examples/recording_events.mdx @@ -5,5 +5,4 @@ mode: "wide" --- _View Notebook on Github_ -
- \ No newline at end of file +{/* SOURCE_FILE: examples/recording-events.ipynb */} \ No newline at end of file diff --git a/docs/v1/examples/simple_agent.mdx b/docs/v1/examples/simple_agent.mdx index c5f599cf7..07a41318d 100644 --- a/docs/v1/examples/simple_agent.mdx +++ b/docs/v1/examples/simple_agent.mdx @@ -5,5 +5,4 @@ mode: "wide" --- _View Notebook on Github_ -
- +{/* SOURCE_FILE: examples/openai-gpt.ipynb */} From 992b26f74576f65ef76d90ba312c73c4e8551645 Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Tue, 17 Sep 2024 17:20:56 -0700 Subject: [PATCH 12/14] Elevated permissions for GITHUB_TOKEN to write PRs. Making errors more pronounced (#398) --- .../add-markdown-examples-to-docs.yml | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/.github/workflows/add-markdown-examples-to-docs.yml b/.github/workflows/add-markdown-examples-to-docs.yml index 03e62824d..fc26fb8b5 100644 --- a/.github/workflows/add-markdown-examples-to-docs.yml +++ b/.github/workflows/add-markdown-examples-to-docs.yml @@ -1,4 +1,4 @@ -name: Update Docs +name: Add Notebook Examples to Docs on: push: @@ -7,9 +7,13 @@ on: paths: - 'examples/**' - 'docs/v1/examples/**' + +permissions: + contents: write + pull-requests: write jobs: - add-markdown-examples-to-docs: + add-notebook-examples-to-docs: runs-on: ubuntu-latest steps: - name: Checkout repository @@ -31,23 +35,20 @@ jobs: echo "Processing file: $file" source_file=$(grep -oP '(?<=\{/\* SOURCE_FILE: ).*(?= \*/\})' "$file" || true) if [[ -z "$source_file" ]]; then - echo "No source file found in $file, skipping..." + echo "Error: No source file found in $file, skipping..." >&2 continue fi echo "Source file: $source_file" if [[ -f "$source_file" ]]; then echo "Converting notebook to markdown" - jupyter nbconvert --to markdown "$source_file" || { echo "Failed to convert $source_file"; continue; } + jupyter nbconvert --to markdown "$source_file" || { echo "Error: Failed to convert $source_file" >&2; continue; } markdown_file="${source_file%.ipynb}.md" echo "Appending markdown to $file" echo -e "\n\n" >> "$file" - cat "$markdown_file" >> "$file" || { echo "Failed to append markdown to $file"; continue; } - echo "Contents of $file after appending markdown:" - cat "$file" - echo "Removing temporary markdown file" - rm "$markdown_file" || { echo "Failed to remove $markdown_file"; continue; } + cat "$markdown_file" >> "$file" || { echo "Error: Failed to append markdown to $file" >&2; continue; } + rm "$markdown_file" || { echo "Error: Failed to remove $markdown_file" >&2; continue; } else - echo "Source file not found: $source_file" + echo "Error: Source file not found: $source_file" >&2 fi done From 9d805f27214a3ba323cf236826305b34231a385f Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Tue, 17 Sep 2024 17:21:03 -0700 Subject: [PATCH 13/14] Limiting triggers for Github Actions (#399) --- .github/workflows/codecov.yml | 10 ++++++++-- .github/workflows/python-testing.yml | 6 ++++++ .github/workflows/tach-check.yml | 8 ++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index c11602bf0..f627c1eeb 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -2,9 +2,15 @@ name: Codecov on: push: - branches: [ main ] + branches: + - main + paths: + - 'agentops/**' pull_request: - branches: [ main ] + branches: + - main + paths: + - 'agentops/**' jobs: test: diff --git a/.github/workflows/python-testing.yml b/.github/workflows/python-testing.yml index 36615c21e..6f79b66f2 100644 --- a/.github/workflows/python-testing.yml +++ b/.github/workflows/python-testing.yml @@ -4,9 +4,15 @@ on: push: branches: - main + paths: + - 'agentops/**' + - 'tests/**' pull_request: branches: - main + paths: + - 'agentops/**' + - 'tests/**' jobs: build: diff --git a/.github/workflows/tach-check.yml b/.github/workflows/tach-check.yml index 2bbbd15e4..32208e6a2 100644 --- a/.github/workflows/tach-check.yml +++ b/.github/workflows/tach-check.yml @@ -1,7 +1,11 @@ - name: Tach Check -on: [pull_request] +on: + pull_request: + paths: + - 'agentops/**' + - 'tests/**' + - 'examples/**' jobs: tach-check: From a246397666f4e51d0630cb6382f6a21fc401d9a7 Mon Sep 17 00:00:00 2001 From: Howard Gil Date: Tue, 17 Sep 2024 17:25:55 -0700 Subject: [PATCH 14/14] adding pip install (#401) --- examples/langchain_examples/langchain_examples.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/langchain_examples/langchain_examples.ipynb b/examples/langchain_examples/langchain_examples.ipynb index b087de3b7..0e22a0139 100644 --- a/examples/langchain_examples/langchain_examples.ipynb +++ b/examples/langchain_examples/langchain_examples.ipynb @@ -25,6 +25,7 @@ "outputs": [], "source": [ "%pip install langchain==0.2.9\n", + "%pip install langchain_openai\n", "%pip install -U agentops\n", "%pip install -U python-dotenv" ] @@ -725,7 +726,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.5" + "version": "3.12.0" } }, "nbformat": 4,