diff --git a/agent_gateway/agents/agent.py b/agent_gateway/agents/agent.py deleted file mode 100644 index 12aba72..0000000 --- a/agent_gateway/agents/agent.py +++ /dev/null @@ -1,336 +0,0 @@ -# Copyright 2024 Snowflake Inc. -# SPDX-License-Identifier: Apache-2.0 -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import json -import logging -from abc import abstractmethod -from collections.abc import Sequence -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union - -import yaml -from chains.llm_chain import LLMChain -from langchain.agents.agent import BaseSingleActionAgent -from langchain.agents.agent_types import AgentType -from langchain.callbacks.base import BaseCallbackManager -from langchain.callbacks.manager import Callbacks -from langchain.prompts.few_shot import FewShotPromptTemplate -from langchain.prompts.prompt import PromptTemplate -from langchain.pydantic_v1 import root_validator -from langchain.schema import ( - AgentAction, - AgentFinish, - BaseOutputParser, - BasePromptTemplate, -) -from langchain.schema.language_model import BaseLanguageModel -from langchain.schema.messages import BaseMessage -from langchain.tools import BaseTool - -logger = logging.getLogger(__name__) - - -class AgentOutputParser(BaseOutputParser): - """Base class for parsing agent output into agent action/finish.""" - - @abstractmethod - def parse(self, text: str) -> Union[AgentAction, AgentFinish]: - """Parse text into agent action/finish.""" - - -class Agent(BaseSingleActionAgent): - """Agent that calls the language model and deciding the action. - - This is driven by an LLMChain. The prompt in the LLMChain MUST include - a variable called "agent_scratchpad" where the agent can put its - intermediary work. - - Copied from Langchain v0.0.283, - but merged with the parent class BaseSingleActionAgent for simplicity. - """ - - llm_chain: LLMChain - output_parser: AgentOutputParser - allowed_tools: Optional[List[str]] = None - - @property - def _agent_type(self) -> str: - """Return Identifier of agent type.""" - raise NotImplementedError - - def dict(self, **kwargs: Any) -> Dict: - """Return dictionary representation of agent.""" - _dict = super().dict() - _type = self._agent_type - if isinstance(_type, AgentType): - _dict["_type"] = str(_type.value) - else: - _dict["_type"] = _type - del _dict["output_parser"] - return _dict - - def get_allowed_tools(self) -> Optional[List[str]]: - return self.allowed_tools - - @property - def return_values(self) -> List[str]: - return ["output"] - - def _fix_text(self, text: str) -> str: - """Fix the text.""" - raise ValueError("fix_text not implemented for this agent.") - - @property - def _stop(self) -> List[str]: - return [ - f"\n{self.observation_prefix.rstrip()}", - f"\n\t{self.observation_prefix.rstrip()}", - ] - - def _construct_scratchpad( - self, intermediate_steps: List[Tuple[AgentAction, str]] - ) -> Union[str, List[BaseMessage]]: - """Construct the scratchpad that lets the agent continue its thought process.""" - thoughts = "" - for action, observation in intermediate_steps: - thoughts += action.log - thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" - return thoughts - - def plan( - self, - intermediate_steps: List[Tuple[AgentAction, str]], - callbacks: Callbacks = None, - **kwargs: Any, - ) -> Union[AgentAction, AgentFinish]: - """Given input, decided what to do. - - Args: - intermediate_steps: Steps the LLM has taken to date, - along with observations - callbacks: Callbacks to run. - **kwargs: User inputs. - - Returns: - Action specifying what tool to use. - - """ - try: - full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) - full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) - return self.output_parser.parse(full_output) - except Exception: - full_inputs["agent_scratchpad"] = ( - full_inputs["agent_scratchpad"] + full_output + "\nAction: " - ) - full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) - return self.output_parser.parse("Action: " + full_output) - - async def aplan( - self, - intermediate_steps: List[Tuple[AgentAction, str]], - callbacks: Callbacks = None, - **kwargs: Any, - ) -> Union[AgentAction, AgentFinish]: - """Given input, decided what to do. - - Args: - intermediate_steps: Steps the LLM has taken to date, - along with observations - callbacks: Callbacks to run. - **kwargs: User inputs. - - Returns: - Action specifying what tool to use. - - """ - try: - full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) - full_output = await self.llm_chain.apredict( - callbacks=callbacks, **full_inputs - ) - agent_output = await self.output_parser.aparse(full_output) - except Exception: - full_inputs["agent_scratchpad"] = ( - full_inputs["agent_scratchpad"] + full_output + "\nAction: " - ) - full_output = await self.llm_chain.apredict( - callbacks=callbacks, **full_inputs - ) - agent_output = await self.output_parser.aparse("Action: " + full_output) - - return agent_output - - def get_full_inputs( - self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any - ) -> Dict[str, Any]: - """Create the full inputs for the LLMChain from intermediate steps.""" - thoughts = self._construct_scratchpad(intermediate_steps) - new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} - full_inputs = {**kwargs, **new_inputs} - return full_inputs - - @property - def input_keys(self) -> List[str]: - """Return the input keys. - - :meta private: - """ - return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"}) - - @root_validator() - def validate_prompt(cls, values: Dict) -> Dict: - """Validate that prompt matches format.""" - prompt = values["llm_chain"].prompt - if "agent_scratchpad" not in prompt.input_variables: - logger.warning( - "`agent_scratchpad` should be a variable in prompt.input_variables." - " Did not find it, so adding it at the end." - ) - prompt.input_variables.append("agent_scratchpad") - if isinstance(prompt, PromptTemplate): - prompt.template += "\n{agent_scratchpad}" - elif isinstance(prompt, FewShotPromptTemplate): - prompt.suffix += "\n{agent_scratchpad}" - else: - raise ValueError(f"Got unexpected prompt type {type(prompt)}") - return values - - @property - @abstractmethod - def observation_prefix(self) -> str: - """Prefix to append the observation with.""" - - @property - @abstractmethod - def llm_prefix(self) -> str: - """Prefix to append the LLM call with.""" - - @classmethod - def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: - """Validate that appropriate tools are passed in.""" - pass - - @classmethod - @abstractmethod - def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: - """Get default output parser for this class.""" - - @classmethod - def from_llm_and_tools( - cls, - llm: BaseLanguageModel, - tools: Sequence[BaseTool], - prompt: BasePromptTemplate, - callback_manager: Optional[BaseCallbackManager] = None, - output_parser: Optional[AgentOutputParser] = None, - **kwargs: Any, - ) -> Agent: - """Construct an agent from an LLM and tools.""" - cls._validate_tools(tools) - llm_chain = LLMChain( - llm=llm, - prompt=prompt, - callback_manager=callback_manager, - ) - tool_names = [tool.name for tool in tools] - _output_parser = output_parser or cls._get_default_output_parser() - return cls( - llm_chain=llm_chain, - allowed_tools=tool_names, - output_parser=_output_parser, - **kwargs, - ) - - def return_stopped_response( - self, - early_stopping_method: str, - intermediate_steps: List[Tuple[AgentAction, str]], - **kwargs: Any, - ) -> AgentFinish: - """Return response when agent has been stopped due to max iterations.""" - if early_stopping_method == "force": - # `force` just returns a constant string - return AgentFinish( - {"output": "Agent stopped due to iteration limit or time limit."}, "" - ) - elif early_stopping_method == "generate": - # Generate does one final forward pass - thoughts = "" - for action, observation in intermediate_steps: - thoughts += action.log - thoughts += ( - f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" - ) - # Adding to the previous steps, we now tell the LLM to make a final pred - thoughts += ( - "\n\nI now need to return a final answer based on the previous steps:" - ) - new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} - full_inputs = {**kwargs, **new_inputs} - full_output = self.llm_chain.predict(**full_inputs) - # We try to extract a final answer - parsed_output = self.output_parser.parse(full_output) - if isinstance(parsed_output, AgentFinish): - # If we can extract, we send the correct stuff - return parsed_output - else: - # If we can extract, but the tool is not the final tool, - # we just return the full output - return AgentFinish({"output": full_output}, full_output) - else: - raise ValueError( - "early_stopping_method should be one of `force` or `generate`, " - f"got {early_stopping_method}" - ) - - def tool_run_logging_kwargs(self) -> Dict: - return { - "llm_prefix": self.llm_prefix, - "observation_prefix": self.observation_prefix, - } - - def save(self, file_path: Union[Path, str]) -> None: - """Save the agent. - - Args: - file_path: Path to file to save the agent to. - - Example: - .. code-block:: python - - # If working with agent executor - agent.agent.save(file_path="path/agent.yaml") - - """ - # Convert file to Path object. - if isinstance(file_path, str): - save_path = Path(file_path) - else: - save_path = file_path - - directory_path = save_path.parent - directory_path.mkdir(parents=True, exist_ok=True) - - # Fetch dictionary to save - agent_dict = self.dict() - - if save_path.suffix == ".json": - with open(file_path, "w") as f: - json.dump(agent_dict, f, indent=4) - elif save_path.suffix == ".yaml": - with open(file_path, "w") as f: - yaml.dump(agent_dict, f, default_flow_style=False) - else: - raise ValueError(f"{save_path} must be json or yaml") diff --git a/agent_gateway/chains/llm_chain.py b/agent_gateway/chains/llm_chain.py deleted file mode 100644 index 741e975..0000000 --- a/agent_gateway/chains/llm_chain.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright 2024 Snowflake Inc. -# SPDX-License-Identifier: Apache-2.0 -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import logging -import warnings -from collections.abc import Sequence -from typing import Any, Dict, List, Optional, Tuple, Union - -from langchain.callbacks.manager import ( - AsyncCallbackManager, - AsyncCallbackManagerForChainRun, - CallbackManager, - CallbackManagerForChainRun, - Callbacks, -) -from langchain.load.dump import dumpd -from langchain.prompts.prompt import PromptTemplate -from langchain.pydantic_v1 import Extra, Field -from langchain.schema import ( - BaseLLMOutputParser, - BasePromptTemplate, - LLMResult, - PromptValue, - StrOutputParser, -) -from langchain.schema.language_model import BaseLanguageModel -from langchain.utils.input import get_colored_text - -from agent_gateway.chains.chain import Chain - -logger = logging.getLogger(__name__) - -MAX_RETRY_GENERATE = 10 - - -class LLMChain(Chain): - """Chain to run queries against LLMs. - - Example: - .. code-block:: python - - from langchain import LLMChain, OpenAI, PromptTemplate - prompt_template = "Tell me a {adjective} joke" - prompt = PromptTemplate( - input_variables=["adjective"], template=prompt_template - ) - llm = LLMChain(llm=OpenAI(), prompt=prompt) - - """ - - @property - def lc_serializable(self) -> bool: - return True - - prompt: BasePromptTemplate - """Prompt object to use.""" - llm: BaseLanguageModel - """Language model to call.""" - output_key: str = "text" #: :meta private: - output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser) - """Output parser to use. - Defaults to one that takes the most likely string but does not change it - otherwise.""" - return_final_only: bool = True - """Whether to return only the final parsed result. Defaults to True. - If false, will return a bunch of extra information about the generation.""" - llm_kwargs: dict = Field(default_factory=dict) - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @property - def input_keys(self) -> List[str]: - """Will be whatever keys the prompt expects. - - :meta private: - """ - return self.prompt.input_variables - - @property - def output_keys(self) -> List[str]: - """Will always return text key. - - :meta private: - """ - if self.return_final_only: - return [self.output_key] - else: - return [self.output_key, "full_generation"] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - response = self.generate([inputs], run_manager=run_manager) - return self.create_outputs(response)[0] - - def generate( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> LLMResult: - """Generate LLM result from inputs.""" - prompts, stop = self.prep_prompts(input_list, run_manager=run_manager) - # TODO: this is a hack to make sure that the stop token always contains EOR - if stop is not None: - stop += [""] - else: - stop = [""] - - # For the llama usage, generation sometimes runs into context length limit error. - # Add a retry logic to avoid this error. - for _ in range(MAX_RETRY_GENERATE): - try: - return self.llm.generate_prompt( - prompts, - stop, - callbacks=run_manager.get_child() if run_manager else None, - **self.llm_kwargs, - ) - except Exception: - text = prompts[0].text - # Try removing in-context examples - first_index = text.find("Question:") - second_index = text.find("Question:", first_index + 1) - if first_index != -1 and second_index != -1: - prompts[0].text = text[:first_index] + text[second_index:] - else: - # otherwise, simply cut the context - prompts[0].text = text[400:] - print("Length shortened", len(prompts[0].text)) - - async def agenerate( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> LLMResult: - """Generate LLM result from inputs.""" - prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager) - # TODO: this is a hack to make sure that the stop token always contains EOR - if stop is not None: - stop += [""] - else: - stop = [""] - # For the llama usage, generation sometimes runs into context length limit error. - # Add a retry logic to avoid this error. - for _ in range(MAX_RETRY_GENERATE): - try: - return await self.llm.agenerate_prompt( - prompts, - stop, - callbacks=run_manager.get_child() if run_manager else None, - **self.llm_kwargs, - ) - except Exception: - text = prompts[0].text - # Try removing in-context examples - first_index = text.find("Question:") - second_index = text.find("Question:", first_index + 1) - if first_index != -1 and second_index != -1: - prompts[0].text = text[:first_index] + text[second_index:] - else: - # otherwise, simply cut the context - prompts[0].text = text[400:] - print("Length shortened", len(prompts[0].text)) - - def prep_prompts( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Tuple[List[PromptValue], Optional[List[str]]]: - """Prepare prompts from inputs.""" - stop = None - if len(input_list) == 0: - return [], stop - if "stop" in input_list[0]: - stop = input_list[0]["stop"] - prompts = [] - for inputs in input_list: - selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} - prompt = self.prompt.format_prompt(**selected_inputs) - _colored_text = get_colored_text(prompt.to_string(), "green") - _text = "Prompt after formatting:\n" + _colored_text - if run_manager: - run_manager.on_text(_text, end="\n", verbose=self.verbose) - if "stop" in inputs and inputs["stop"] != stop: - raise ValueError( - "If `stop` is present in any inputs, should be present in all." - ) - prompts.append(prompt) - return prompts, stop - - async def aprep_prompts( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Tuple[List[PromptValue], Optional[List[str]]]: - """Prepare prompts from inputs.""" - stop = None - if len(input_list) == 0: - return [], stop - if "stop" in input_list[0]: - stop = input_list[0]["stop"] - prompts = [] - for inputs in input_list: - selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} - prompt = self.prompt.format_prompt(**selected_inputs) - _colored_text = get_colored_text(prompt.to_string(), "green") - _text = "Prompt after formatting:\n" + _colored_text - if run_manager: - await run_manager.on_text(_text, end="\n", verbose=self.verbose) - if "stop" in inputs and inputs["stop"] != stop: - raise ValueError( - "If `stop` is present in any inputs, should be present in all." - ) - prompts.append(prompt) - return prompts, stop - - def apply( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> List[Dict[str, str]]: - """Utilize the LLM generate method for speed gains.""" - callback_manager = CallbackManager.configure( - callbacks, self.callbacks, self.verbose - ) - run_manager = callback_manager.on_chain_start( - dumpd(self), - {"input_list": input_list}, - ) - try: - response = self.generate(input_list, run_manager=run_manager) - except (KeyboardInterrupt, Exception) as e: - run_manager.on_chain_error(e) - raise e - outputs = self.create_outputs(response) - run_manager.on_chain_end({"outputs": outputs}) - return outputs - - async def aapply( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> List[Dict[str, str]]: - """Utilize the LLM generate method for speed gains.""" - callback_manager = AsyncCallbackManager.configure( - callbacks, self.callbacks, self.verbose - ) - run_manager = await callback_manager.on_chain_start( - dumpd(self), - {"input_list": input_list}, - ) - try: - response = await self.agenerate(input_list, run_manager=run_manager) - except (KeyboardInterrupt, Exception) as e: - await run_manager.on_chain_error(e) - raise e - outputs = self.create_outputs(response) - await run_manager.on_chain_end({"outputs": outputs}) - return outputs - - @property - def _run_output_key(self) -> str: - return self.output_key - - def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]: - """Create outputs from response.""" - result = [ - # Get the text of the top generated string. - { - self.output_key: self.output_parser.parse_result(generation), - "full_generation": generation, - } - for generation in llm_result.generations - ] - if self.return_final_only: - result = [{self.output_key: r[self.output_key]} for r in result] - return result - - async def _acall( - self, - inputs: Dict[str, Any], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - response = await self.agenerate([inputs], run_manager=run_manager) - return self.create_outputs(response)[0] - - def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: - """Format prompt with kwargs and pass to LLM. - - Args: - callbacks: Callbacks to pass to LLMChain - **kwargs: Keys to pass to prompt template. - - Returns: - Completion from LLM. - - Example: - .. code-block:: python - - completion = llm.predict(adjective="funny") - - """ - return self(kwargs, callbacks=callbacks)[self.output_key] - - async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: - """Format prompt with kwargs and pass to LLM. - - Args: - callbacks: Callbacks to pass to LLMChain - **kwargs: Keys to pass to prompt template. - - Returns: - Completion from LLM. - - Example: - .. code-block:: python - - completion = llm.predict(adjective="funny") - - """ - return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] - - def predict_and_parse( - self, callbacks: Callbacks = None, **kwargs: Any - ) -> Union[str, List[str], Dict[str, Any]]: - """Call predict and then parse the results.""" - warnings.warn( - "The predict_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = self.predict(callbacks=callbacks, **kwargs) - if self.prompt.output_parser is not None: - return self.prompt.output_parser.parse(result) - else: - return result - - async def apredict_and_parse( - self, callbacks: Callbacks = None, **kwargs: Any - ) -> Union[str, List[str], Dict[str, str]]: - """Call apredict and then parse the results.""" - warnings.warn( - "The apredict_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = await self.apredict(callbacks=callbacks, **kwargs) - if self.prompt.output_parser is not None: - return self.prompt.output_parser.parse(result) - else: - return result - - def apply_and_parse( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> Sequence[Union[str, List[str], Dict[str, str]]]: - """Call apply and then parse the results.""" - warnings.warn( - "The apply_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = self.apply(input_list, callbacks=callbacks) - return self._parse_generation(result) - - def _parse_generation( - self, generation: List[Dict[str, str]] - ) -> Sequence[Union[str, List[str], Dict[str, str]]]: - if self.prompt.output_parser is not None: - return [ - self.prompt.output_parser.parse(res[self.output_key]) - for res in generation - ] - else: - return generation - - async def aapply_and_parse( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> Sequence[Union[str, List[str], Dict[str, str]]]: - """Call apply and then parse the results.""" - warnings.warn( - "The aapply_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = await self.aapply(input_list, callbacks=callbacks) - return self._parse_generation(result) - - @property - def _chain_type(self) -> str: - return "llm_chain" - - @classmethod - def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain: - """Create LLMChain from LLM and template.""" - prompt_template = PromptTemplate.from_template(template) - return cls(llm=llm, prompt=prompt_template) diff --git a/agent_gateway/executors/agent_executor.py b/agent_gateway/executors/agent_executor.py deleted file mode 100644 index 4a998ef..0000000 --- a/agent_gateway/executors/agent_executor.py +++ /dev/null @@ -1,580 +0,0 @@ -# Copyright 2024 Snowflake Inc. -# SPDX-License-Identifier: Apache-2.0 -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import asyncio -import time -from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union - -from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent -from langchain.agents.agent_iterator import AgentExecutorIterator -from langchain.agents.tools import InvalidTool -from langchain.callbacks.manager import ( - AsyncCallbackManagerForChainRun, - AsyncCallbackManagerForToolRun, - CallbackManagerForChainRun, - CallbackManagerForToolRun, - Callbacks, -) -from langchain.pydantic_v1 import root_validator -from langchain.schema import AgentAction, AgentFinish, OutputParserException -from langchain.utilities.asyncio import asyncio_timeout -from langchain.utils.input import get_color_mapping - -from agent_gateway.chains.chain import Chain -from agent_gateway.tools.base import BaseTool - - -class ExceptionTool(BaseTool): - """Tool that just returns the query.""" - - name: str = "_Exception" - """Name of the tool.""" - description: str = "Exception tool" - """Description of the tool.""" - - def _run( - self, - query: str, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - return query - - async def _arun( - self, - query: str, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - return query - - -class AgentExecutor(Chain): - """Agent that is using tools.""" - - agent: Union[BaseSingleActionAgent, BaseMultiActionAgent] - """The agent to run for creating a plan and determining actions - to take at each step of the execution loop.""" - tools: Sequence[BaseTool] - """The valid tools the agent can call.""" - return_intermediate_steps: bool = False - """Whether to return the agent's trajectory of intermediate steps - at the end in addition to the final output.""" - max_iterations: Optional[int] = 15 - """The maximum number of steps to take before ending the execution - loop. - - Setting to 'None' could lead to an infinite loop.""" - max_execution_time: Optional[float] = None - """The maximum amount of wall clock time to spend in the execution - loop. - """ - early_stopping_method: str = "force" - """The method to use for early stopping if the agent never - returns `AgentFinish`. Either 'force' or 'generate'. - - `"force"` returns a string saying that it stopped because it met a - time or iteration limit. - - `"generate"` calls the agent's LLM Chain one final time to generate - a final answer based on the previous steps. - """ - handle_parsing_errors: Union[bool, str, Callable[[OutputParserException], str]] = ( - False - ) - """How to handle errors raised by the agent's output parser. - Defaults to `False`, which raises the error. - - If `true`, the error will be sent back to the LLM as an observation. - If a string, the string itself will be sent to the LLM as an observation. - If a callable function, the function will be called with the exception - as an argument, and the result of that function will be passed to the agent - as an observation. - """ - trim_intermediate_steps: Union[ - int, Callable[[List[Tuple[AgentAction, str]]], List[Tuple[AgentAction, str]]] - ] = -1 - - @classmethod - def from_agent_and_tools( - cls, - agent: Union[BaseSingleActionAgent, BaseMultiActionAgent], - tools: Sequence[BaseTool], - callbacks: Callbacks = None, - **kwargs: Any, - ) -> AgentExecutor: - """Create from agent and tools.""" - return cls( - agent=agent, - tools=tools, - callbacks=callbacks, - **kwargs, - ) - - @root_validator() - def validate_tools(cls, values: Dict) -> Dict: - """Validate that tools are compatible with agent.""" - agent = values["agent"] - tools = values["tools"] - allowed_tools = agent.get_allowed_tools() - if allowed_tools is not None: - if set(allowed_tools) != set([tool.name for tool in tools]): - raise ValueError( - f"Allowed tools ({allowed_tools}) different than " - f"provided tools ({[tool.name for tool in tools]})" - ) - return values - - @root_validator() - def validate_return_direct_tool(cls, values: Dict) -> Dict: - """Validate that tools are compatible with agent.""" - agent = values["agent"] - tools = values["tools"] - if isinstance(agent, BaseMultiActionAgent): - for tool in tools: - if tool.return_direct: - raise ValueError( - "Tools that have `return_direct=True` are not allowed " - "in multi-action agents" - ) - return values - - def save(self, file_path: Union[Path, str]) -> None: - """Raise error - saving not supported for Agent Executors.""" - raise ValueError( - "Saving not supported for agent executors. " - "If you are trying to save the agent, please use the " - "`.save_agent(...)`" - ) - - def save_agent(self, file_path: Union[Path, str]) -> None: - """Save the underlying agent.""" - return self.agent.save(file_path) - - def iter( - self, - inputs: Any, - callbacks: Callbacks = None, - *, - include_run_info: bool = False, - async_: bool = False, - ) -> AgentExecutorIterator: - """Enables iteration over steps taken to reach final output.""" - return AgentExecutorIterator( - self, - inputs, - callbacks, - tags=self.tags, - include_run_info=include_run_info, - async_=async_, - ) - - @property - def input_keys(self) -> List[str]: - """Return the input keys. - - :meta private: - """ - return self.agent.input_keys - - @property - def output_keys(self) -> List[str]: - """Return the singular output key. - - :meta private: - """ - if self.return_intermediate_steps: - return self.agent.return_values + ["intermediate_steps"] - else: - return self.agent.return_values - - def lookup_tool(self, name: str) -> BaseTool: - """Lookup tool by name.""" - return {tool.name: tool for tool in self.tools}[name] - - def _should_continue(self, iterations: int, time_elapsed: float) -> bool: - if self.max_iterations is not None and iterations >= self.max_iterations: - return False - if ( - self.max_execution_time is not None - and time_elapsed >= self.max_execution_time - ): - return False - - return True - - def _return( - self, - output: AgentFinish, - intermediate_steps: list, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - if run_manager: - run_manager.on_agent_finish(output, color="green", verbose=self.verbose) - final_output = output.return_values - if self.return_intermediate_steps: - final_output["intermediate_steps"] = intermediate_steps - return final_output - - async def _areturn( - self, - output: AgentFinish, - intermediate_steps: list, - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - if run_manager: - await run_manager.on_agent_finish( - output, color="green", verbose=self.verbose - ) - final_output = output.return_values - if self.return_intermediate_steps: - final_output["intermediate_steps"] = intermediate_steps - return final_output - - def _take_next_step( - self, - name_to_tool_map: Dict[str, BaseTool], - color_mapping: Dict[str, str], - inputs: Dict[str, str], - intermediate_steps: List[Tuple[AgentAction, str]], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: - """Take a single step in the thought-action-observation loop. - - Override this to take control of how the agent makes and acts on choices. - """ - try: - intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) - - # Call the LLM to see what to do. - output = self.agent.plan( - intermediate_steps, - callbacks=run_manager.get_child() if run_manager else None, - **inputs, - ) - except OutputParserException as e: - if isinstance(self.handle_parsing_errors, bool): - raise_error = not self.handle_parsing_errors - else: - raise_error = False - if raise_error: - raise e - text = str(e) - if isinstance(self.handle_parsing_errors, bool): - if e.send_to_llm: - observation = str(e.observation) - text = str(e.llm_output) - else: - observation = "Invalid or incomplete response" - elif isinstance(self.handle_parsing_errors, str): - observation = self.handle_parsing_errors - elif callable(self.handle_parsing_errors): - observation = self.handle_parsing_errors(e) - else: - raise ValueError("Got unexpected type of `handle_parsing_errors`") - output = AgentAction("_Exception", observation, text) - if run_manager: - run_manager.on_agent_action(output, color="green") - tool_run_kwargs = self.agent.tool_run_logging_kwargs() - observation = ExceptionTool().run( - output.tool_input, - verbose=self.verbose, - color=None, - callbacks=run_manager.get_child() if run_manager else None, - **tool_run_kwargs, - ) - return [(output, observation)] - # If the tool chosen is the finishing tool, then we end and return. - if isinstance(output, AgentFinish): - return output - actions: List[AgentAction] - if isinstance(output, AgentAction): - actions = [output] - else: - actions = output - result = [] - for agent_action in actions: - if run_manager: - run_manager.on_agent_action(agent_action, color="green") - # Otherwise we lookup the tool - if agent_action.tool in name_to_tool_map: - tool = name_to_tool_map[agent_action.tool] - return_direct = tool.return_direct - color = color_mapping[agent_action.tool] - tool_run_kwargs = self.agent.tool_run_logging_kwargs() - if return_direct: - tool_run_kwargs["llm_prefix"] = "" - # We then call the tool on the tool input to get an observation - observation = tool.run( - agent_action.tool_input, - verbose=self.verbose, - color=color, - callbacks=run_manager.get_child() if run_manager else None, - **tool_run_kwargs, - ) - else: - tool_run_kwargs = self.agent.tool_run_logging_kwargs() - observation = InvalidTool().run( - { - "requested_tool_name": agent_action.tool, - "available_tool_names": list(name_to_tool_map.keys()), - }, - verbose=self.verbose, - color=None, - callbacks=run_manager.get_child() if run_manager else None, - **tool_run_kwargs, - ) - result.append((agent_action, observation)) - return result - - async def _atake_next_step( - self, - name_to_tool_map: Dict[str, BaseTool], - color_mapping: Dict[str, str], - inputs: Dict[str, str], - intermediate_steps: List[Tuple[AgentAction, str]], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: - """Take a single step in the thought-action-observation loop. - - Override this to take control of how the agent makes and acts on choices. - """ - try: - intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) - - # Call the LLM to see what to do. - output = await self.agent.aplan( - intermediate_steps, - callbacks=run_manager.get_child() if run_manager else None, - **inputs, - ) - except OutputParserException as e: - if isinstance(self.handle_parsing_errors, bool): - raise_error = not self.handle_parsing_errors - else: - raise_error = False - if raise_error: - raise e - text = str(e) - if isinstance(self.handle_parsing_errors, bool): - if e.send_to_llm: - observation = str(e.observation) - text = str(e.llm_output) - else: - observation = "Invalid or incomplete response" - elif isinstance(self.handle_parsing_errors, str): - observation = self.handle_parsing_errors - elif callable(self.handle_parsing_errors): - observation = self.handle_parsing_errors(e) - else: - raise ValueError("Got unexpected type of `handle_parsing_errors`") - output = AgentAction("_Exception", observation, text) - tool_run_kwargs = self.agent.tool_run_logging_kwargs() - observation = await ExceptionTool().arun( - output.tool_input, - verbose=self.verbose, - color=None, - callbacks=run_manager.get_child() if run_manager else None, - **tool_run_kwargs, - ) - return [(output, observation)] - # If the tool chosen is the finishing tool, then we end and return. - if isinstance(output, AgentFinish): - return output - actions: List[AgentAction] - if isinstance(output, AgentAction): - actions = [output] - else: - actions = output - - async def _aperform_agent_action( - agent_action: AgentAction, - ) -> Tuple[AgentAction, str]: - if run_manager: - await run_manager.on_agent_action( - agent_action, verbose=self.verbose, color="green" - ) - # Otherwise we lookup the tool - if agent_action.tool in name_to_tool_map: - tool = name_to_tool_map[agent_action.tool] - return_direct = tool.return_direct - color = color_mapping[agent_action.tool] - tool_run_kwargs = self.agent.tool_run_logging_kwargs() - if return_direct: - tool_run_kwargs["llm_prefix"] = "" - # We then call the tool on the tool input to get an observation - observation = await tool.arun( - agent_action.tool_input, - verbose=self.verbose, - color=color, - callbacks=run_manager.get_child() if run_manager else None, - **tool_run_kwargs, - ) - else: - tool_run_kwargs = self.agent.tool_run_logging_kwargs() - observation = await InvalidTool().arun( - { - "requested_tool_name": agent_action.tool, - "available_tool_names": list(name_to_tool_map.keys()), - }, - verbose=self.verbose, - color=None, - callbacks=run_manager.get_child() if run_manager else None, - **tool_run_kwargs, - ) - return agent_action, observation - - # Use asyncio.gather to run multiple tool.arun() calls concurrently - result = await asyncio.gather( - *[_aperform_agent_action(agent_action) for agent_action in actions], - return_exceptions=True, - ) - - return list(result) - - def _call( - self, - inputs: Dict[str, str], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - """Run text through and get agent response.""" - # Construct a mapping of tool name to tool for easy lookup - name_to_tool_map = {tool.name: tool for tool in self.tools} - # We construct a mapping from each tool to a color, used for logging. - color_mapping = get_color_mapping( - [tool.name for tool in self.tools], excluded_colors=["green", "red"] - ) - intermediate_steps: List[Tuple[AgentAction, str]] = [] - # Let's start tracking the number of iterations and time elapsed - iterations = 0 - time_elapsed = 0.0 - start_time = time.time() - - # We now enter the agent loop (until it returns something). - while self._should_continue(iterations, time_elapsed): - next_step_output = self._take_next_step( - name_to_tool_map, - color_mapping, - inputs, - intermediate_steps, - run_manager=run_manager, - ) - if isinstance(next_step_output, AgentFinish): - return self._return( - next_step_output, intermediate_steps, run_manager=run_manager - ) - - intermediate_steps.extend(next_step_output) - if len(next_step_output) == 1: - next_step_action = next_step_output[0] - # See if tool should return directly - tool_return = self._get_tool_return(next_step_action) - if tool_return is not None: - return self._return( - tool_return, intermediate_steps, run_manager=run_manager - ) - iterations += 1 - time_elapsed = time.time() - start_time - output = self.agent.return_stopped_response( - self.early_stopping_method, intermediate_steps, **inputs - ) - return self._return(output, intermediate_steps, run_manager=run_manager) - - async def _acall( - self, - inputs: Dict[str, str], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - """Run text through and get agent response.""" - # Construct a mapping of tool name to tool for easy lookup - name_to_tool_map = {tool.name: tool for tool in self.tools} - # We construct a mapping from each tool to a color, used for logging. - color_mapping = get_color_mapping( - [tool.name for tool in self.tools], excluded_colors=["green"] - ) - intermediate_steps: List[Tuple[AgentAction, str]] = [] - # Let's start tracking the number of iterations and time elapsed - iterations = 0 - time_elapsed = 0.0 - start_time = time.time() - # We now enter the agent loop (until it returns something). - async with asyncio_timeout(self.max_execution_time): - try: - while self._should_continue(iterations, time_elapsed): - next_step_output = await self._atake_next_step( - name_to_tool_map, - color_mapping, - inputs, - intermediate_steps, - run_manager=run_manager, - ) - if isinstance(next_step_output, AgentFinish): - return await self._areturn( - next_step_output, - intermediate_steps, - run_manager=run_manager, - ) - intermediate_steps.extend(next_step_output) - if len(next_step_output) == 1: - next_step_action = next_step_output[0] - # See if tool should return directly - tool_return = self._get_tool_return(next_step_action) - if tool_return is not None: - return await self._areturn( - tool_return, intermediate_steps, run_manager=run_manager - ) - - iterations += 1 - time_elapsed = time.time() - start_time - output = self.agent.return_stopped_response( - self.early_stopping_method, intermediate_steps, **inputs - ) - return await self._areturn( - output, intermediate_steps, run_manager=run_manager - ) - except TimeoutError: - # stop early when interrupted by the async timeout - output = self.agent.return_stopped_response( - self.early_stopping_method, intermediate_steps, **inputs - ) - return await self._areturn( - output, intermediate_steps, run_manager=run_manager - ) - - def _get_tool_return( - self, next_step_output: Tuple[AgentAction, str] - ) -> Optional[AgentFinish]: - """Check if the tool is a returning tool.""" - agent_action, observation = next_step_output - name_to_tool_map = {tool.name: tool for tool in self.tools} - # Invalid tools won't be in the map, so we return False. - if agent_action.tool in name_to_tool_map: - if name_to_tool_map[agent_action.tool].return_direct: - return AgentFinish( - {self.agent.return_values[0]: observation}, - "", - ) - return None - - def _prepare_intermediate_steps( - self, intermediate_steps: List[Tuple[AgentAction, str]] - ) -> List[Tuple[AgentAction, str]]: - if ( - isinstance(self.trim_intermediate_steps, int) - and self.trim_intermediate_steps > 0 - ): - return intermediate_steps[-self.trim_intermediate_steps :] - elif callable(self.trim_intermediate_steps): - return self.trim_intermediate_steps(intermediate_steps) - else: - return intermediate_steps diff --git a/pyproject.toml b/pyproject.toml index f01b261..9afc4cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,8 @@ dependencies = [ dev = [ "pre-commit>=4.0.1,<5.0", "ruff>=0.6.9,<1.0", - "pytest>=8.3.3,<9.0" + "pytest>=8.3.3,<9.0", + "coverage>=7.6.9,<8.0" ] streamlit = ["streamlit>=1.39.0,<2.0"] diff --git a/uv.lock b/uv.lock index 09ec65c..ddd2e06 100644 --- a/uv.lock +++ b/uv.lock @@ -416,6 +416,75 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, ] +[[package]] +name = "coverage" +version = "7.6.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5b/d2/c25011f4d036cf7e8acbbee07a8e09e9018390aee25ba085596c4b83d510/coverage-7.6.9.tar.gz", hash = "sha256:4a8d8977b0c6ef5aeadcb644da9e69ae0dcfe66ec7f368c89c72e058bd71164d", size = 801710 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/f3/f830fb53bf7e4f1d5542756f61d9b740352a188f43854aab9409c8cdeb18/coverage-7.6.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85d9636f72e8991a1706b2b55b06c27545448baf9f6dbf51c4004609aacd7dcb", size = 207024 }, + { url = "https://files.pythonhosted.org/packages/4e/e3/ea5632a3a6efd00ab0a791adc0f3e48512097a757ee7dcbee5505f57bafa/coverage-7.6.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:608a7fd78c67bee8936378299a6cb9f5149bb80238c7a566fc3e6717a4e68710", size = 207463 }, + { url = "https://files.pythonhosted.org/packages/e4/ae/18ff8b5580e27e62ebcc888082aa47694c2772782ea7011ddf58e377e98f/coverage-7.6.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96d636c77af18b5cb664ddf12dab9b15a0cfe9c0bde715da38698c8cea748bfa", size = 235902 }, + { url = "https://files.pythonhosted.org/packages/6a/52/57030a8d15ab935624d298360f0a6704885578e39f7b4f68569e59f5902d/coverage-7.6.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75cded8a3cff93da9edc31446872d2997e327921d8eed86641efafd350e1df1", size = 233806 }, + { url = "https://files.pythonhosted.org/packages/d0/c5/4466602195ecaced298d55af1e29abceb812addabefd5bd9116a204f7bab/coverage-7.6.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7b15f589593110ae767ce997775d645b47e5cbbf54fd322f8ebea6277466cec", size = 234966 }, + { url = "https://files.pythonhosted.org/packages/b0/1c/55552c3009b7bf96732e36548596ade771c87f89cf1f5a8e3975b33539b5/coverage-7.6.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:44349150f6811b44b25574839b39ae35291f6496eb795b7366fef3bd3cf112d3", size = 234029 }, + { url = "https://files.pythonhosted.org/packages/bb/7d/da3dca6878701182ea42c51df47a47c80eaef2a76f5aa3e891dc2a8cce3f/coverage-7.6.9-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d891c136b5b310d0e702e186d70cd16d1119ea8927347045124cb286b29297e5", size = 232494 }, + { url = "https://files.pythonhosted.org/packages/28/cc/39de85ac1d5652bc34ff2bee39ae251b1fdcaae53fab4b44cab75a432bc0/coverage-7.6.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:db1dab894cc139f67822a92910466531de5ea6034ddfd2b11c0d4c6257168073", size = 233611 }, + { url = "https://files.pythonhosted.org/packages/d1/2b/7eb011a9378911088708f121825a71134d0c15fac96972a0ae7a8f5a4049/coverage-7.6.9-cp310-cp310-win32.whl", hash = "sha256:41ff7b0da5af71a51b53f501a3bac65fb0ec311ebed1632e58fc6107f03b9198", size = 209712 }, + { url = "https://files.pythonhosted.org/packages/5b/35/c3f40a2269b416db34ce1dedf682a7132c26f857e33596830fa4deebabf9/coverage-7.6.9-cp310-cp310-win_amd64.whl", hash = "sha256:35371f8438028fdccfaf3570b31d98e8d9eda8bb1d6ab9473f5a390969e98717", size = 210553 }, + { url = "https://files.pythonhosted.org/packages/b1/91/b3dc2f7f38b5cca1236ab6bbb03e84046dd887707b4ec1db2baa47493b3b/coverage-7.6.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:932fc826442132dde42ee52cf66d941f581c685a6313feebed358411238f60f9", size = 207133 }, + { url = "https://files.pythonhosted.org/packages/0d/2b/53fd6cb34d443429a92b3ec737f4953627e38b3bee2a67a3c03425ba8573/coverage-7.6.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:085161be5f3b30fd9b3e7b9a8c301f935c8313dcf928a07b116324abea2c1c2c", size = 207577 }, + { url = "https://files.pythonhosted.org/packages/74/f2/68edb1e6826f980a124f21ea5be0d324180bf11de6fd1defcf9604f76df0/coverage-7.6.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc660a77e1c2bf24ddbce969af9447a9474790160cfb23de6be4fa88e3951c7", size = 239524 }, + { url = "https://files.pythonhosted.org/packages/d3/83/8fec0ee68c2c4a5ab5f0f8527277f84ed6f2bd1310ae8a19d0c5532253ab/coverage-7.6.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c69e42c892c018cd3c8d90da61d845f50a8243062b19d228189b0224150018a9", size = 236925 }, + { url = "https://files.pythonhosted.org/packages/8b/20/8f50e7c7ad271144afbc2c1c6ec5541a8c81773f59352f8db544cad1a0ec/coverage-7.6.9-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0824a28ec542a0be22f60c6ac36d679e0e262e5353203bea81d44ee81fe9c6d4", size = 238792 }, + { url = "https://files.pythonhosted.org/packages/6f/62/4ac2e5ad9e7a5c9ec351f38947528e11541f1f00e8a0cdce56f1ba7ae301/coverage-7.6.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4401ae5fc52ad8d26d2a5d8a7428b0f0c72431683f8e63e42e70606374c311a1", size = 237682 }, + { url = "https://files.pythonhosted.org/packages/58/2f/9d2203f012f3b0533c73336c74134b608742be1ce475a5c72012573cfbb4/coverage-7.6.9-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98caba4476a6c8d59ec1eb00c7dd862ba9beca34085642d46ed503cc2d440d4b", size = 236310 }, + { url = "https://files.pythonhosted.org/packages/33/6d/31f6ab0b4f0f781636075f757eb02141ea1b34466d9d1526dbc586ed7078/coverage-7.6.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ee5defd1733fd6ec08b168bd4f5387d5b322f45ca9e0e6c817ea6c4cd36313e3", size = 237096 }, + { url = "https://files.pythonhosted.org/packages/7d/fb/e14c38adebbda9ed8b5f7f8e03340ac05d68d27b24397f8d47478927a333/coverage-7.6.9-cp311-cp311-win32.whl", hash = "sha256:f2d1ec60d6d256bdf298cb86b78dd715980828f50c46701abc3b0a2b3f8a0dc0", size = 209682 }, + { url = "https://files.pythonhosted.org/packages/a4/11/a782af39b019066af83fdc0e8825faaccbe9d7b19a803ddb753114b429cc/coverage-7.6.9-cp311-cp311-win_amd64.whl", hash = "sha256:0d59fd927b1f04de57a2ba0137166d31c1a6dd9e764ad4af552912d70428c92b", size = 210542 }, + { url = "https://files.pythonhosted.org/packages/60/52/b16af8989a2daf0f80a88522bd8e8eed90b5fcbdecf02a6888f3e80f6ba7/coverage-7.6.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:99e266ae0b5d15f1ca8d278a668df6f51cc4b854513daab5cae695ed7b721cf8", size = 207325 }, + { url = "https://files.pythonhosted.org/packages/0f/79/6b7826fca8846c1216a113227b9f114ac3e6eacf168b4adcad0cb974aaca/coverage-7.6.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9901d36492009a0a9b94b20e52ebfc8453bf49bb2b27bca2c9706f8b4f5a554a", size = 207563 }, + { url = "https://files.pythonhosted.org/packages/a7/07/0bc73da0ccaf45d0d64ef86d33b7d7fdeef84b4c44bf6b85fb12c215c5a6/coverage-7.6.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abd3e72dd5b97e3af4246cdada7738ef0e608168de952b837b8dd7e90341f015", size = 240580 }, + { url = "https://files.pythonhosted.org/packages/71/8a/9761f409910961647d892454687cedbaccb99aae828f49486734a82ede6e/coverage-7.6.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff74026a461eb0660366fb01c650c1d00f833a086b336bdad7ab00cc952072b3", size = 237613 }, + { url = "https://files.pythonhosted.org/packages/8b/10/ee7d696a17ac94f32f2dbda1e17e730bf798ae9931aec1fc01c1944cd4de/coverage-7.6.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65dad5a248823a4996724a88eb51d4b31587aa7aa428562dbe459c684e5787ae", size = 239684 }, + { url = "https://files.pythonhosted.org/packages/16/60/aa1066040d3c52fff051243c2d6ccda264da72dc6d199d047624d395b2b2/coverage-7.6.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22be16571504c9ccea919fcedb459d5ab20d41172056206eb2994e2ff06118a4", size = 239112 }, + { url = "https://files.pythonhosted.org/packages/4e/e5/69f35344c6f932ba9028bf168d14a79fedb0dd4849b796d43c81ce75a3c9/coverage-7.6.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f957943bc718b87144ecaee70762bc2bc3f1a7a53c7b861103546d3a403f0a6", size = 237428 }, + { url = "https://files.pythonhosted.org/packages/32/20/adc895523c4a28f63441b8ac645abd74f9bdd499d2d175bef5b41fc7f92d/coverage-7.6.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ae1387db4aecb1f485fb70a6c0148c6cdaebb6038f1d40089b1fc84a5db556f", size = 239098 }, + { url = "https://files.pythonhosted.org/packages/a9/a6/e0e74230c9bb3549ec8ffc137cfd16ea5d56e993d6bffed2218bff6187e3/coverage-7.6.9-cp312-cp312-win32.whl", hash = "sha256:1a330812d9cc7ac2182586f6d41b4d0fadf9be9049f350e0efb275c8ee8eb692", size = 209940 }, + { url = "https://files.pythonhosted.org/packages/3e/18/cb5b88349d4aa2f41ec78d65f92ea32572b30b3f55bc2b70e87578b8f434/coverage-7.6.9-cp312-cp312-win_amd64.whl", hash = "sha256:b12c6b18269ca471eedd41c1b6a1065b2f7827508edb9a7ed5555e9a56dcfc97", size = 210726 }, + { url = "https://files.pythonhosted.org/packages/35/26/9abab6539d2191dbda2ce8c97b67d74cbfc966cc5b25abb880ffc7c459bc/coverage-7.6.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:899b8cd4781c400454f2f64f7776a5d87bbd7b3e7f7bda0cb18f857bb1334664", size = 207356 }, + { url = "https://files.pythonhosted.org/packages/44/da/d49f19402240c93453f606e660a6676a2a1fbbaa6870cc23207790aa9697/coverage-7.6.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:61f70dc68bd36810972e55bbbe83674ea073dd1dcc121040a08cdf3416c5349c", size = 207614 }, + { url = "https://files.pythonhosted.org/packages/da/e6/93bb9bf85497816082ec8da6124c25efa2052bd4c887dd3b317b91990c9e/coverage-7.6.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a289d23d4c46f1a82d5db4abeb40b9b5be91731ee19a379d15790e53031c014", size = 240129 }, + { url = "https://files.pythonhosted.org/packages/df/65/6a824b9406fe066835c1274a9949e06f084d3e605eb1a602727a27ec2fe3/coverage-7.6.9-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e216d8044a356fc0337c7a2a0536d6de07888d7bcda76febcb8adc50bdbbd00", size = 237276 }, + { url = "https://files.pythonhosted.org/packages/9f/79/6c7a800913a9dd23ac8c8da133ebb556771a5a3d4df36b46767b1baffd35/coverage-7.6.9-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c026eb44f744acaa2bda7493dad903aa5bf5fc4f2554293a798d5606710055d", size = 239267 }, + { url = "https://files.pythonhosted.org/packages/57/e7/834d530293fdc8a63ba8ff70033d5182022e569eceb9aec7fc716b678a39/coverage-7.6.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e77363e8425325384f9d49272c54045bbed2f478e9dd698dbc65dbc37860eb0a", size = 238887 }, + { url = "https://files.pythonhosted.org/packages/15/05/ec9d6080852984f7163c96984444e7cd98b338fd045b191064f943ee1c08/coverage-7.6.9-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:777abfab476cf83b5177b84d7486497e034eb9eaea0d746ce0c1268c71652077", size = 236970 }, + { url = "https://files.pythonhosted.org/packages/0a/d8/775937670b93156aec29f694ce37f56214ed7597e1a75b4083ee4c32121c/coverage-7.6.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:447af20e25fdbe16f26e84eb714ba21d98868705cb138252d28bc400381f6ffb", size = 238831 }, + { url = "https://files.pythonhosted.org/packages/f4/58/88551cb7fdd5ec98cb6044e8814e38583436b14040a5ece15349c44c8f7c/coverage-7.6.9-cp313-cp313-win32.whl", hash = "sha256:d872ec5aeb086cbea771c573600d47944eea2dcba8be5f3ee649bfe3cb8dc9ba", size = 210000 }, + { url = "https://files.pythonhosted.org/packages/b7/12/cfbf49b95120872785ff8d56ab1c7fe3970a65e35010c311d7dd35c5fd00/coverage-7.6.9-cp313-cp313-win_amd64.whl", hash = "sha256:fd1213c86e48dfdc5a0cc676551db467495a95a662d2396ecd58e719191446e1", size = 210753 }, + { url = "https://files.pythonhosted.org/packages/7c/68/c1cb31445599b04bde21cbbaa6d21b47c5823cdfef99eae470dfce49c35a/coverage-7.6.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9e7484d286cd5a43744e5f47b0b3fb457865baf07bafc6bee91896364e1419", size = 208091 }, + { url = "https://files.pythonhosted.org/packages/11/73/84b02c6b19c4a11eb2d5b5eabe926fb26c21c080e0852f5e5a4f01165f9e/coverage-7.6.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e5ea1cf0872ee455c03e5674b5bca5e3e68e159379c1af0903e89f5eba9ccc3a", size = 208369 }, + { url = "https://files.pythonhosted.org/packages/de/e0/ae5d878b72ff26df2e994a5c5b1c1f6a7507d976b23beecb1ed4c85411ef/coverage-7.6.9-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d10e07aa2b91835d6abec555ec8b2733347956991901eea6ffac295f83a30e4", size = 251089 }, + { url = "https://files.pythonhosted.org/packages/ab/9c/0aaac011aef95a93ef3cb2fba3fde30bc7e68a6635199ed469b1f5ea355a/coverage-7.6.9-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13a9e2d3ee855db3dd6ea1ba5203316a1b1fd8eaeffc37c5b54987e61e4194ae", size = 246806 }, + { url = "https://files.pythonhosted.org/packages/f8/19/4d5d3ae66938a7dcb2f58cef3fa5386f838f469575b0bb568c8cc9e3a33d/coverage-7.6.9-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c38bf15a40ccf5619fa2fe8f26106c7e8e080d7760aeccb3722664c8656b030", size = 249164 }, + { url = "https://files.pythonhosted.org/packages/b3/0b/4ee8a7821f682af9ad440ae3c1e379da89a998883271f088102d7ca2473d/coverage-7.6.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d5275455b3e4627c8e7154feaf7ee0743c2e7af82f6e3b561967b1cca755a0be", size = 248642 }, + { url = "https://files.pythonhosted.org/packages/8a/12/36ff1d52be18a16b4700f561852e7afd8df56363a5edcfb04cf26a0e19e0/coverage-7.6.9-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8f8770dfc6e2c6a2d4569f411015c8d751c980d17a14b0530da2d7f27ffdd88e", size = 246516 }, + { url = "https://files.pythonhosted.org/packages/43/d0/8e258f6c3a527c1655602f4f576215e055ac704de2d101710a71a2affac2/coverage-7.6.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8d2dfa71665a29b153a9681edb1c8d9c1ea50dfc2375fb4dac99ea7e21a0bcd9", size = 247783 }, + { url = "https://files.pythonhosted.org/packages/a9/0d/1e4a48d289429d38aae3babdfcadbf35ca36bdcf3efc8f09b550a845bdb5/coverage-7.6.9-cp313-cp313t-win32.whl", hash = "sha256:5e6b86b5847a016d0fbd31ffe1001b63355ed309651851295315031ea7eb5a9b", size = 210646 }, + { url = "https://files.pythonhosted.org/packages/26/74/b0729f196f328ac55e42b1e22ec2f16d8bcafe4b8158a26ec9f1cdd1d93e/coverage-7.6.9-cp313-cp313t-win_amd64.whl", hash = "sha256:97ddc94d46088304772d21b060041c97fc16bdda13c6c7f9d8fcd8d5ae0d8611", size = 211815 }, + { url = "https://files.pythonhosted.org/packages/93/fe/8873d88999b8e4b0d8150df554d72d6943b3938bba328fcb5422572cfd84/coverage-7.6.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:adb697c0bd35100dc690de83154627fbab1f4f3c0386df266dded865fc50a902", size = 207022 }, + { url = "https://files.pythonhosted.org/packages/23/c1/5dc48dfe3714a6ae9d2cd128a9df39570e46d3831f19a9be84011e767209/coverage-7.6.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:be57b6d56e49c2739cdf776839a92330e933dd5e5d929966fbbd380c77f060be", size = 207458 }, + { url = "https://files.pythonhosted.org/packages/e8/08/5644e101c823f0b18aa5c408037c2438fad05e6eb9f9e6581459aa0bfb92/coverage-7.6.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1592791f8204ae9166de22ba7e6705fa4ebd02936c09436a1bb85aabca3e599", size = 235494 }, + { url = "https://files.pythonhosted.org/packages/b2/02/995c019c0a2d70188d4d8184a0376eb28fcfb759981bb0e9961b463344fd/coverage-7.6.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e12ae8cc979cf83d258acb5e1f1cf2f3f83524d1564a49d20b8bec14b637f08", size = 233416 }, + { url = "https://files.pythonhosted.org/packages/eb/d3/48ce8c9a89c7013f89ec7e01402e7a136a2e849c8f8664ea7f17b225295c/coverage-7.6.9-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb5555cff66c4d3d6213a296b360f9e1a8e323e74e0426b6c10ed7f4d021e464", size = 234546 }, + { url = "https://files.pythonhosted.org/packages/20/d2/11ac147bd76cc5d8a6254c9a9b6beaab51c3532ba0abdfaf669bf48d2c67/coverage-7.6.9-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9389a429e0e5142e69d5bf4a435dd688c14478a19bb901735cdf75e57b13845", size = 233655 }, + { url = "https://files.pythonhosted.org/packages/18/cb/6e35c5766041737f14c31ad02b5404ae6ec05d4e17ccffd69f6d99431e0a/coverage-7.6.9-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:592ac539812e9b46046620341498caf09ca21023c41c893e1eb9dbda00a70cbf", size = 232145 }, + { url = "https://files.pythonhosted.org/packages/ff/62/5de767f225e09ce959b71d1f3efc9e86e1c3de1fded85886bf705248905d/coverage-7.6.9-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a27801adef24cc30871da98a105f77995e13a25a505a0161911f6aafbd66e678", size = 233131 }, + { url = "https://files.pythonhosted.org/packages/65/72/bacb4b4c9da226e2343aa4bfebcb2bc008eda2f28aa913474aef27bfc397/coverage-7.6.9-cp39-cp39-win32.whl", hash = "sha256:8e3c3e38930cfb729cb8137d7f055e5a473ddaf1217966aa6238c88bd9fd50e6", size = 209735 }, + { url = "https://files.pythonhosted.org/packages/f4/4d/096d19dbd8998c9aaf8798078dd884f65652eb891fe7b0e657b5ac07411d/coverage-7.6.9-cp39-cp39-win_amd64.whl", hash = "sha256:e28bf44afa2b187cc9f41749138a64435bf340adfcacb5b2290c070ce99839d4", size = 210517 }, + { url = "https://files.pythonhosted.org/packages/15/0e/4ac9035ee2ee08d2b703fdad2d84283ec0bad3b46eb4ad6affb150174cb6/coverage-7.6.9-pp39.pp310-none-any.whl", hash = "sha256:f3ca78518bc6bc92828cd11867b121891d75cae4ea9e908d72030609b996db1b", size = 199270 }, +] + [[package]] name = "cryptography" version = "44.0.0" @@ -1094,6 +1163,7 @@ dependencies = [ [package.optional-dependencies] dev = [ + { name = "coverage" }, { name = "pre-commit" }, { name = "pytest" }, { name = "ruff" }, @@ -1106,6 +1176,7 @@ streamlit = [ requires-dist = [ { name = "aiohttp", specifier = ">=3.10.9,<4.0" }, { name = "asyncio", specifier = ">=3.4.3" }, + { name = "coverage", marker = "extra == 'dev'", specifier = ">=7.6.9,<8.0" }, { name = "langchain", specifier = ">=0.3.2,<0.4" }, { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=4.0.1,<5.0" }, { name = "pydantic", specifier = ">=2.9.2,<3.0" },