From a12b59a9a187be886c976af578b90779d575dc5a Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Fri, 9 Aug 2024 14:24:17 -0700 Subject: [PATCH] Langchain upgrade (#339) * WIP: langchain upgrade * debugging WIP * WIP notebook * langchain handler and example upgrade * code cleanup * black formatting * remove logger setup * cleanup * use logger * import change * code cleanup * add code back * langchain notebook merge * black formatting --- README.md | 2 +- agentops/client.py | 3 + .../partners/langchain_callback_handler.py | 50 +- examples/crew/job_posting.ipynb | 1 + examples/crew/job_posting.py | 10 +- examples/crew/markdown_validator.ipynb | 44 +- examples/langchain/langchain_examples.ipynb | 665 ++++++++++++++---- examples/litellm/litellm_example.ipynb | 4 +- .../multion/Autonomous_web_browsing.ipynb | 4 +- examples/multion/Sample_browsing_agent.ipynb | 4 +- examples/recording-events.ipynb | 2 + .../_test_langchain_handler.py | 2 +- 12 files changed, 592 insertions(+), 199 deletions(-) diff --git a/README.md b/README.md index f4a4a409..55028a03 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,7 @@ To use the handler, import and set import os from langchain.chat_models import ChatOpenAI from langchain.agents import initialize_agent, AgentType -from agentops.langchain_callback_handler import LangchainCallbackHandler +from agentops.partners.langchain_callback_handler import LangchainCallbackHandler AGENTOPS_API_KEY = os.environ['AGENTOPS_API_KEY'] handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example']) diff --git a/agentops/client.py b/agentops/client.py index 4007e410..a024d9c7 100644 --- a/agentops/client.py +++ b/agentops/client.py @@ -82,6 +82,9 @@ def configure( ) def initialize(self) -> Union[Session, None]: + if self.is_initialized: + return + self.unsuppress_logs() if self._config.api_key is None: diff --git a/agentops/partners/langchain_callback_handler.py b/agentops/partners/langchain_callback_handler.py index 4c95fd74..847abe66 100644 --- a/agentops/partners/langchain_callback_handler.py +++ b/agentops/partners/langchain_callback_handler.py @@ -46,9 +46,8 @@ def __init__( endpoint: Optional[str] = None, max_wait_time: Optional[int] = None, max_queue_size: Optional[int] = None, - tags: Optional[List[str]] = None, + default_tags: Optional[List[str]] = None, ): - logging_level = os.getenv("AGENTOPS_LOGGING_LEVEL") log_levels = { "CRITICAL": logging.CRITICAL, @@ -64,12 +63,19 @@ def __init__( "endpoint": endpoint, "max_wait_time": max_wait_time, "max_queue_size": max_queue_size, - "tags": tags, + "default_tags": default_tags, } - self.ao_client = AOClient( - **{k: v for k, v in client_params.items() if v is not None}, override=False - ) + self.ao_client = AOClient() + if self.ao_client.session_count == 0: + self.ao_client.configure( + **{k: v for k, v in client_params.items() if v is not None}, + instrument_llm_calls=False, + ) + + if not self.ao_client.is_initialized: + self.ao_client.initialize() + self.agent_actions: Dict[UUID, List[ActionEvent]] = defaultdict(list) self.events = Events() @@ -93,7 +99,6 @@ def on_llm_start( }, # TODO: params is inconsistent, in ToolEvent we put it in logs model=get_model_from_kwargs(kwargs), prompt=prompts[0], - # tags=tags # TODO ) @debug_print_function_params @@ -156,15 +161,18 @@ def on_chain_start( metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: - self.events.chain[str(run_id)] = ActionEvent( - params={ - **serialized, - **inputs, - **({} if metadata is None else metadata), - **kwargs, - }, - action_type="chain", - ) + try: + self.events.chain[str(run_id)] = ActionEvent( + params={ + **serialized, + **inputs, + **({} if metadata is None else metadata), + **kwargs, + }, + action_type="chain", + ) + except Exception as e: + logger.warning(e) @debug_print_function_params def on_chain_end( @@ -240,6 +248,8 @@ def on_tool_end( details=output, ) self.ao_client.record(error_event) + else: + self.ao_client.record(tool_event) @debug_print_function_params def on_tool_error( @@ -357,7 +367,13 @@ def on_retry( @property def session_id(self): - return self.ao_client.current_session_id + raise DeprecationWarning( + "session_id is deprecated in favor of current_session_ids" + ) + + @property + def current_session_ids(self): + return self.ao_client.current_session_ids class AsyncLangchainCallbackHandler(AsyncCallbackHandler): diff --git a/examples/crew/job_posting.ipynb b/examples/crew/job_posting.ipynb index 5e41e61c..0e9a857d 100644 --- a/examples/crew/job_posting.ipynb +++ b/examples/crew/job_posting.ipynb @@ -476,6 +476,7 @@ "source": [ "from textwrap import dedent\n", "\n", + "\n", "class Tasks:\n", " def research_company_culture_task(self, agent, company_description, company_domain):\n", " return Task(\n", diff --git a/examples/crew/job_posting.py b/examples/crew/job_posting.py index e4d7d9c4..a96d692e 100644 --- a/examples/crew/job_posting.py +++ b/examples/crew/job_posting.py @@ -3,8 +3,12 @@ import os -os.environ["SERPER_API_KEY"] = "..." -os.environ["OPENAI_API_KEY"] = "..." +# os.environ["SERPER_API_KEY"] = "..." +# os.environ["OPENAI_API_KEY"] = "..." + +from dotenv import load_dotenv + +load_dotenv() from crewai import Agent from crewai_tools.tools import WebsiteSearchTool, SerperDevTool, FileReadTool @@ -126,7 +130,7 @@ def industry_analysis_task(self, agent, company_domain, company_description): from crewai import Crew import agentops -agentops.init(tags=["crew-job-posting-example"]) +agentops.init(default_tags=["crew-job-posting-example"]) tasks = Tasks() agents = Agents() diff --git a/examples/crew/markdown_validator.ipynb b/examples/crew/markdown_validator.ipynb index 2adc853f..acb013a3 100644 --- a/examples/crew/markdown_validator.ipynb +++ b/examples/crew/markdown_validator.ipynb @@ -303,7 +303,7 @@ } ], "source": [ - "%pip install -U crewai \n", + "%pip install -U crewai\n", "%pip install -U agentops\n", "%pip install -U python-dotenv\n", "%pip install -U langchain_openai\n", @@ -400,7 +400,7 @@ }, "outputs": [], "source": [ - "agentops.init(AGENTOPS_API_KEY, default_tags=['markdown_validator'])" + "agentops.init(AGENTOPS_API_KEY, default_tags=[\"markdown_validator\"])" ] }, { @@ -424,7 +424,7 @@ " A tool to review files for markdown syntax errors.\n", "\n", " Returns:\n", - " - validation_results: A list of validation results \n", + " - validation_results: A list of validation results\n", " and suggestions on how to fix them.\n", " \"\"\"\n", "\n", @@ -439,7 +439,7 @@ " return results # Return the reviewed document\n", " except PyMarkdownApiException as this_exception:\n", " print(f\"API Exception: {this_exception}\", file=sys.stderr)\n", - " return f\"API Exception: {str(this_exception)}\"\n" + " return f\"API Exception: {str(this_exception)}\"" ] }, { @@ -455,10 +455,12 @@ " model_name=\"llama3-70b-8192\",\n", ")\n", "\n", - "default_llm = ChatOpenAI(openai_api_base=os.environ.get(\"OPENAI_API_BASE_URL\", \"https://api.openai.com/v1\"),\n", - " openai_api_key=OPENAI_API_KEY,\n", - " temperature=0.1, \n", - " model_name=os.environ.get(\"MODEL_NAME\", \"gpt-3.5-turbo\"))" + "default_llm = ChatOpenAI(\n", + " openai_api_base=os.environ.get(\"OPENAI_API_BASE_URL\", \"https://api.openai.com/v1\"),\n", + " openai_api_key=OPENAI_API_KEY,\n", + " temperature=0.1,\n", + " model_name=os.environ.get(\"MODEL_NAME\", \"gpt-3.5-turbo\"),\n", + ")" ] }, { @@ -486,22 +488,24 @@ "metadata": {}, "outputs": [], "source": [ - "general_agent = Agent(role='Requirements Manager',\n", - " goal=\"\"\"Provide a detailed list of the markdown \n", + "general_agent = Agent(\n", + " role=\"Requirements Manager\",\n", + " goal=\"\"\"Provide a detailed list of the markdown \n", " linting results. Give a summary with actionable \n", " tasks to address the validation results. Write your \n", " response as if you were handing it to a developer \n", " to fix the issues.\n", " DO NOT provide examples of how to fix the issues or\n", " recommend other tools to use.\"\"\",\n", - " backstory=\"\"\"You are an expert business analyst \n", + " backstory=\"\"\"You are an expert business analyst \n", "\t\t\t\t\tand software QA specialist. You provide high quality, \n", " thorough, insightful and actionable feedback via \n", " detailed list of changes and actionable tasks.\"\"\",\n", - " allow_delegation=False, \n", - " verbose=True,\n", - " tools=[markdown_validation_tool],\n", - " llm=default_llm) #groq_llm) " + " allow_delegation=False,\n", + " verbose=True,\n", + " tools=[markdown_validation_tool],\n", + " llm=default_llm,\n", + ") # groq_llm)" ] }, { @@ -519,7 +523,8 @@ "metadata": {}, "outputs": [], "source": [ - "syntax_review_task = Task(description=f\"\"\"\n", + "syntax_review_task = Task(\n", + " description=f\"\"\"\n", " Use the markdown_validation_tool to review \n", " the file(s) at this path: {filename}\n", " \n", @@ -539,8 +544,9 @@ " \n", " If you already know the answer or if you do not need \n", " to use a tool, return it as your Final Answer.\"\"\",\n", - " agent=general_agent,\n", - " expected_output=\"\")" + " agent=general_agent,\n", + " expected_output=\"\",\n", + ")" ] }, { @@ -576,7 +582,7 @@ "metadata": {}, "outputs": [], "source": [ - "agentops.end_session('Success')" + "agentops.end_session(\"Success\")" ] } ], diff --git a/examples/langchain/langchain_examples.ipynb b/examples/langchain/langchain_examples.ipynb index 0069add4..5e198cd7 100644 --- a/examples/langchain/langchain_examples.ipynb +++ b/examples/langchain/langchain_examples.ipynb @@ -7,163 +7,508 @@ "source": [ "# AgentOps Langchain Agent Implementation\n", "\n", - "Using AgentOps monitoring with Langchain is simple. We've created a LangchainCallbackHandler that will do all of the heavy lifting!" - ] - }, - { - "cell_type": "markdown", - "id": "1516a90d", - "metadata": {}, - "source": [ + "Using AgentOps monitoring with Langchain is simple. We've created a LangchainCallbackHandler that will do all of the heavy lifting!\n", + "\n", "First let's install the required packages" ] }, { + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-08T00:29:29.288644Z", + "start_time": "2024-08-08T00:29:29.026314Z" + } + }, "cell_type": "code", - "execution_count": null, - "id": "e5fc8497", - "metadata": {}, - "outputs": [], "source": [ - "%pip install -U langchain\n", + "%pip install langchain==0.2.9\n", "%pip install -U agentops\n", "%pip install -U python-dotenv" - ] + ], + "id": "8e3d38adc7861277", + "outputs": [], + "execution_count": 1 }, { - "cell_type": "markdown", - "id": "9480596a", "metadata": {}, - "source": [ - "Then import them" - ] + "cell_type": "markdown", + "source": "Then import them", + "id": "effc8ee7453a6c3" }, { "cell_type": "code", - "execution_count": null, - "id": "initial_id", - "metadata": {}, - "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", - "from langchain.agents import tool\n", "import os\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain.agents import tool, AgentExecutor, create_openai_tools_agent\n", "from dotenv import load_dotenv" - ] + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:30.052348Z", + "start_time": "2024-08-08T00:29:29.289841Z" + } + }, + "id": "d344305baa26d651", + "outputs": [], + "execution_count": 2 }, { "cell_type": "markdown", - "id": "57ddb8eca4e8a3cb", - "metadata": {}, "source": [ "The only difference with using AgentOps is that we'll also import this special Callback Handler" - ] + ], + "metadata": { + "collapsed": false + }, + "id": "3584d84a441169b0" }, { "cell_type": "code", - "execution_count": null, - "id": "7e8f8cd098ad5b57", + "source": [ + "from agentops.partners.langchain_callback_handler import (\n", + " LangchainCallbackHandler as AgentOpsLangchainCallbackHandler,\n", + ")" + ], "metadata": { - "collapsed": false + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:30.058535Z", + "start_time": "2024-08-08T00:29:30.054497Z" + } }, + "id": "256f01cac66d7d90", "outputs": [], + "execution_count": 3 + }, + { + "cell_type": "markdown", "source": [ - "from agentops.partners.langchain_callback_handler import (\n", - " LangchainCallbackHandler as AgentOpsLangchainCallbackHandler,\n", + "Next, we'll grab our two API keys. You can use dotenv like below or however else you like to load environment variables" + ], + "metadata": { + "collapsed": false + }, + "id": "afab0f7bbd847916" + }, + { + "cell_type": "code", + "source": "load_dotenv()", + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:30.072366Z", + "start_time": "2024-08-08T00:29:30.063997Z" + } + }, + "id": "3f9189f4d13ebd07", + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 4 + }, + { + "cell_type": "markdown", + "source": [ + "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n", + "\n", + "Pass in your API key, and optionally any tags to describe this session for easier lookup in the AO dashboard." + ], + "metadata": { + "collapsed": false + }, + "id": "6f5a9ab030c636c6" + }, + { + "cell_type": "code", + "source": [ + "from langchain_core.prompts import ChatPromptTemplate\n", + "\n", + "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n", + "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")\n", + "\n", + "agentops_handler = AgentOpsLangchainCallbackHandler(\n", + " api_key=AGENTOPS_API_KEY, default_tags=[\"Langchain Example\"]\n", + ")\n", + "\n", + "llm = ChatOpenAI(\n", + " openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n", + ")\n", + "\n", + "# You must pass in a callback handler to record your agent\n", + "llm.callbacks = [agentops_handler]\n", + "\n", + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", \"You are a helpful assistant. Respond only in Spanish.\"),\n", + " (\"human\", \"{input}\"),\n", + " # Placeholders fill up a **list** of messages\n", + " (\"placeholder\", \"{agent_scratchpad}\"),\n", + " # (\"tool_names\", \"find_movie\")\n", + " ]\n", ")" - ] + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:31.018329Z", + "start_time": "2024-08-08T00:29:30.074528Z" + } + }, + "id": "7d4accd2f68404fb", + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "🖇 AgentOps: \u001B[34m\u001B[34mSession Replay: https://app.agentops.ai/drilldown?session_id=418f60fc-a709-4a61-952c-6b9670406198\u001B[0m\u001B[0m\n" + ] + } + ], + "execution_count": 5 }, { "cell_type": "markdown", - "id": "25f189b0", - "metadata": {}, "source": [ - "Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables" - ] + "You can also retrieve the `session_id` of the newly created session." + ], + "metadata": { + "collapsed": false + }, + "id": "821b2367b1082673" }, { "cell_type": "code", - "execution_count": null, - "id": "974514a8", - "metadata": {}, - "outputs": [], "source": [ - "load_dotenv()\n", - "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n", - "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"" - ] + "print(\"Agent Ops session ID: \" + str(agentops_handler.current_session_ids))" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:31.021351Z", + "start_time": "2024-08-08T00:29:31.019165Z" + } + }, + "id": "6bb30c31eaa5ba02", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Agent Ops session ID: ['418f60fc-a709-4a61-952c-6b9670406198']\n" + ] + } + ], + "execution_count": 6 }, { "cell_type": "markdown", - "id": "51f083697b783fa4", + "source": [ + "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded." + ], "metadata": { "collapsed": false }, + "id": "f7362d89b6a7af4c" + }, + { + "cell_type": "code", "source": [ - "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n", + "@tool\n", + "def find_movie(genre: str) -> str:\n", + " \"\"\"Find available movies\"\"\"\n", + " if genre == \"drama\":\n", + " return \"Dune 2\"\n", + " else:\n", + " return \"Pineapple Express\"\n", "\n", - "Optionally pass in any tags to describe this session for easier lookup in the AO dashboard." - ] + "\n", + "tools = [find_movie]" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:31.071133Z", + "start_time": "2024-08-08T00:29:31.022304Z" + } + }, + "id": "4a28f85842129016", + "outputs": [], + "execution_count": 7 + }, + { + "cell_type": "markdown", + "source": [ + "For each tool, you need to also add the callback handler" + ], + "metadata": { + "collapsed": false + }, + "id": "186984add993f839" }, { "cell_type": "code", - "execution_count": null, - "id": "d432fe915edb6365", + "source": [ + "for t in tools:\n", + " t.callbacks = [agentops_handler]" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-08-08T00:29:31.073655Z", + "start_time": "2024-08-08T00:29:31.071945Z" + } + }, + "id": "afe9113b61c67e80", + "outputs": [], + "execution_count": 8 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Add the tools to our LLM", + "id": "6dd5f322d26f692" + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": 9, + "source": "llm_with_tools = llm.bind_tools([find_movie])", + "id": "e469ab337023053e" + }, + { + "cell_type": "markdown", + "source": "Finally, let's create our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard", "metadata": { "collapsed": false }, + "id": "88b96e62db542900" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-08T00:29:31.127744Z", + "start_time": "2024-08-08T00:29:31.124945Z" + } + }, + "cell_type": "code", + "source": [ + "agent = create_openai_tools_agent(llm, tools, prompt)\n", + "agent_executor = AgentExecutor(agent=agent, tools=tools)" + ], + "id": "d45ff1b7a3843191", "outputs": [], + "execution_count": 10 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-08T00:29:32.666768Z", + "start_time": "2024-08-08T00:29:31.128749Z" + } + }, + "cell_type": "code", "source": [ - "agentops_handler = AgentOpsLangchainCallbackHandler(\n", - " api_key=AGENTOPS_API_KEY, tags=[\"Langchain Example\"]\n", - ")\n", - "\n", - "llm = ChatOpenAI(\n", - " openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n", + "agent_executor.invoke(\n", + " {\"input\": \"What comedies are playing?\"}, config={\"callback\": [agentops_handler]}\n", ")" - ] + ], + "id": "bb90bf54b5a42a63", + "outputs": [ + { + "data": { + "text/plain": [ + "{'input': 'What comedies are playing?',\n", + " 'output': '\"Pineapple Express\" está disponible para ver. ¿Te gustaría saber más sobre esta película?'}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 11 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Now if we look in the AgentOps dashboard, you will see a session recorded with the LLM calls and tool usage.", + "id": "c466cb71c9dc68db" + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": "assert False", + "id": "8571b7ae931b3cb2" }, { "cell_type": "markdown", - "id": "38d309f07363b58e", + "source": [ + "## Langchain V0.1 Example\n", + "This example is out of date." + ], "metadata": { "collapsed": false }, + "id": "97a1a264a71876" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-08T00:29:32.741186Z", + "start_time": "2024-08-08T00:29:32.741133Z" + } + }, + "cell_type": "code", + "source": "%pip install langchain==0.1.6", + "id": "decac159bb492462", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "initial_id", + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-08T00:29:32.741653Z", + "start_time": "2024-08-08T00:29:32.741599Z" + } + }, "source": [ - "You can also retrieve the `session_id` of the newly created session." + "import os\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain.agents import initialize_agent, AgentType\n", + "from langchain.agents import tool" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "57ddb8eca4e8a3cb", + "metadata": {}, + "source": [ + "The only difference with using AgentOps is that we'll also import this special Callback Handler" ] }, { "cell_type": "code", - "execution_count": null, - "id": "f7e3a37cde3f9c22", + "source": [ + "from agentops.partners.langchain_callback_handler import (\n", + " LangchainCallbackHandler as AgentOpsLangchainCallbackHandler,\n", + ")" + ], "metadata": { "collapsed": false }, + "id": "7e8f8cd098ad5b57", "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", "source": [ - "print(\"Agent Ops session ID: \" + str(agentops_handler.session_id))" - ] + "Next, we'll grab our two API keys. You can use dotenv like below or however else you like to load environment variables" + ], + "metadata": { + "collapsed": false + }, + "id": "14a1b8e08a2e9eb3" + }, + { + "cell_type": "code", + "source": [ + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv()" + ], + "metadata": { + "collapsed": false + }, + "id": "ff6cfc570599871f", + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "42f226ace56ef6f5", + "source": [ + "This is where AgentOps comes into play. Before creating our LLM instance via Langchain, first we'll create an instance of the AO LangchainCallbackHandler. After the handler is initialized, a session will be recorded automatically.\n", + "\n", + "Pass in your API key, and optionally any tags to describe this session for easier lookup in the AO dashboard." + ], "metadata": { "collapsed": false }, + "id": "51f083697b783fa4" + }, + { + "cell_type": "code", "source": [ - "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded." - ] + "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n", + "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")\n", + "\n", + "agentops_handler = AgentOpsLangchainCallbackHandler(\n", + " api_key=AGENTOPS_API_KEY, default_tags=[\"Langchain Example\"]\n", + ")\n", + "\n", + "llm = ChatOpenAI(\n", + " openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n", + ")" + ], + "metadata": { + "collapsed": false + }, + "id": "d432fe915edb6365", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "You can also retrieve the `session_id` of the newly created session." + ], + "metadata": { + "collapsed": false + }, + "id": "38d309f07363b58e" }, { "cell_type": "code", - "execution_count": null, - "id": "c103a2edbe837abd", + "source": [ + "print(\"Agent Ops session ID: \" + str(agentops_handler.current_session_ids))" + ], "metadata": { "collapsed": false }, + "id": "f7e3a37cde3f9c22", "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Agents generally use tools. Let's define a simple tool here. Tool usage is also recorded." + ], + "metadata": { + "collapsed": false + }, + "id": "42f226ace56ef6f5" + }, + { + "cell_type": "code", "source": [ "@tool\n", "def find_movie(genre) -> str:\n", @@ -175,49 +520,49 @@ "\n", "\n", "tools = [find_movie]" - ] + ], + "metadata": { + "collapsed": false + }, + "id": "c103a2edbe837abd", + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "4fb7633857b19bf0", + "source": [ + "For each tool, you need to also add the callback handler" + ], "metadata": { "collapsed": false }, - "source": [ - "For each tool, you need to also add the callback handler" - ] + "id": "4fb7633857b19bf0" }, { "cell_type": "code", - "execution_count": null, - "id": "a0345f08bf1c5ecd", + "source": [ + "for t in tools:\n", + " t.callbacks = [agentops_handler]" + ], "metadata": { "collapsed": false }, + "id": "a0345f08bf1c5ecd", "outputs": [], - "source": [ - "for t in tools:\n", - " t.callbacks = [agentops_handler]" - ] + "execution_count": null }, { "cell_type": "markdown", - "id": "12a02b833716676b", + "source": [ + "Finally, let's use our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard" + ], "metadata": { "collapsed": false }, - "source": [ - "Finally, let's use our agent! Pass in the callback handler to the agent, and all the actions will be recorded in the AO Dashboard" - ] + "id": "12a02b833716676b" }, { "cell_type": "code", - "execution_count": null, - "id": "2d2e83fa69b30add", - "metadata": { - "collapsed": false - }, - "outputs": [], "source": [ "agent = initialize_agent(\n", " tools,\n", @@ -229,94 +574,98 @@ " ], # You must pass in a callback handler to record your agent\n", " handle_parsing_errors=True,\n", ")" - ] + ], + "metadata": { + "collapsed": false + }, + "id": "2d2e83fa69b30add", + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "df2bc3a384493e1e", + "source": [ + "agent.invoke(\"What comedies are playing?\", callbacks=[agentops_handler])" + ], "metadata": { "collapsed": false }, + "id": "df2bc3a384493e1e", "outputs": [], - "source": [ - "agent.run(\"What comedies are playing?\", callbacks=[agentops_handler])" - ] + "execution_count": null }, { "cell_type": "markdown", - "id": "2230edd919182a55", - "metadata": { - "collapsed": false - }, "source": [ "## Check your session\n", "Finally, check your run on [AgentOps](https://app.agentops.ai)\n", "![image.png](attachment:3d9393fa-3d6a-4193-b6c9-43413dc19d15.png)" - ] - }, - { - "cell_type": "markdown", - "id": "fbf4a3ec5fa60d74", + ], "metadata": { "collapsed": false }, + "id": "2230edd919182a55" + }, + { + "cell_type": "markdown", "source": [ "# Async Agents\n", "\n", "Several langchain agents require async callback handlers. AgentOps also supports this." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed63a166b343e1a2", + ], "metadata": { "collapsed": false }, - "outputs": [], - "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", - "from langchain.agents import tool\n", - "import os\n", - "from dotenv import load_dotenv" - ] + "id": "fbf4a3ec5fa60d74" }, { "cell_type": "code", - "execution_count": null, - "id": "aa15223969f97b3d", + "source": [ + "import os\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.agents import initialize_agent, AgentType\n", + "from langchain.agents import tool" + ], "metadata": { "collapsed": false }, + "id": "ed63a166b343e1a2", "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", "source": [ "from agentops.partners.langchain_callback_handler import (\n", " AsyncLangchainCallbackHandler as AgentOpsAsyncLangchainCallbackHandler,\n", ")" - ] + ], + "metadata": { + "collapsed": false + }, + "id": "aa15223969f97b3d", + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "824e1d44", - "metadata": {}, - "outputs": [], "source": [ + "from dotenv import load_dotenv\n", + "\n", "load_dotenv()\n", - "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"\n", - "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ae76cfe058f5e4e4", + "\n", + "AGENTOPS_API_KEY = os.environ.get(\"AGENTOPS_API_KEY\")\n", + "OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")" + ], "metadata": { "collapsed": false }, + "id": "818357483f039b60", "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", "source": [ "agentops_handler = AgentOpsAsyncLangchainCallbackHandler(\n", " api_key=AGENTOPS_API_KEY, tags=[\"Async Example\"]\n", @@ -327,16 +676,16 @@ ")\n", "\n", "print(\"Agent Ops session ID: \" + str(await agentops_handler.session_id))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1201049766be84a7", + ], "metadata": { "collapsed": false }, + "id": "ae76cfe058f5e4e4", "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", "source": [ "@tool\n", "def find_movie(genre) -> str:\n", @@ -351,16 +700,16 @@ "\n", "for t in tools:\n", " t.callbacks = [agentops_handler]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d4f9dd39b79d542", + ], "metadata": { "collapsed": false }, + "id": "1201049766be84a7", "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", "source": [ "agent = initialize_agent(\n", " tools,\n", @@ -372,20 +721,26 @@ ")\n", "\n", "await agent.arun(\"What comedies are playing?\")" - ] - }, - { - "cell_type": "markdown", - "id": "fb276a2e-f1c3-4f0f-8818-b7730e9d3ff7", + ], "metadata": { "collapsed": false }, + "id": "8d4f9dd39b79d542", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", "source": [ "## Check your session\n", "Finally, check your run on [AgentOps](https://app.agentops.ai)\n", "\n", "![image.png](attachment:69f2121a-d437-4c09-bbbe-c76c9243ee19.png)" - ] + ], + "metadata": { + "collapsed": false + }, + "id": "fb276a2e-f1c3-4f0f-8818-b7730e9d3ff7" } ], "metadata": { diff --git a/examples/litellm/litellm_example.ipynb b/examples/litellm/litellm_example.ipynb index 78cccf3a..9cde2d32 100644 --- a/examples/litellm/litellm_example.ipynb +++ b/examples/litellm/litellm_example.ipynb @@ -102,7 +102,9 @@ "outputs": [], "source": [ "messages = [{\"role\": \"user\", \"content\": \"Write a 12 word poem about secret agents.\"}]\n", - "response = litellm.completion(model=\"gpt-4\", messages=messages) # or the model of your choosing\n", + "response = litellm.completion(\n", + " model=\"gpt-4\", messages=messages\n", + ") # or the model of your choosing\n", "print(response.choices[0].message.content)" ] }, diff --git a/examples/multion/Autonomous_web_browsing.ipynb b/examples/multion/Autonomous_web_browsing.ipynb index f5b6a269..407a5bd2 100644 --- a/examples/multion/Autonomous_web_browsing.ipynb +++ b/examples/multion/Autonomous_web_browsing.ipynb @@ -126,7 +126,9 @@ "metadata": {}, "outputs": [], "source": [ - "agentops.init(AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"])" + "agentops.init(\n", + " AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"]\n", + ")" ] }, { diff --git a/examples/multion/Sample_browsing_agent.ipynb b/examples/multion/Sample_browsing_agent.ipynb index a0b890fc..9a75ff10 100644 --- a/examples/multion/Sample_browsing_agent.ipynb +++ b/examples/multion/Sample_browsing_agent.ipynb @@ -98,7 +98,9 @@ "metadata": {}, "outputs": [], "source": [ - "agentops.init(AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"])" + "agentops.init(\n", + " AGENTOPS_API_KEY, auto_start_session=False, default_tags=[\"MultiOn browse example\"]\n", + ")" ] }, { diff --git a/examples/recording-events.ipynb b/examples/recording-events.ipynb index 7c744a3c..b1ec915a 100644 --- a/examples/recording-events.ipynb +++ b/examples/recording-events.ipynb @@ -128,10 +128,12 @@ "source": [ "from agentops import record_action\n", "\n", + "\n", "@record_action(\"add numbers\")\n", "def add(x, y):\n", " return x + y\n", "\n", + "\n", "add(2, 4)" ] }, diff --git a/tests/langchain_handlers/_test_langchain_handler.py b/tests/langchain_handlers/_test_langchain_handler.py index 0cde5c5d..97cc2205 100644 --- a/tests/langchain_handlers/_test_langchain_handler.py +++ b/tests/langchain_handlers/_test_langchain_handler.py @@ -4,7 +4,7 @@ from langchain.agents import initialize_agent, AgentType from dotenv import load_dotenv from langchain.agents import tool -from agentops.langchain_callback_handler import ( +from agentops.partners.langchain_callback_handler import ( LangchainCallbackHandler as AgentOpsLangchainCallbackHandler, AsyncLangchainCallbackHandler as AgentOpsAsyncLangchainCallbackHandler, )