diff --git a/README.md b/README.md index 90e78ec5e..1679b4713 100644 --- a/README.md +++ b/README.md @@ -220,6 +220,8 @@ Swarms framework is not just a tool but a robust, scalable, and secure partner i ## Community - [Join the Swarms community here on Discord!](https://discord.gg/AJazBmhKnr) +# Discovery Call +Book a discovery call with the Swarms team to learn how to optimize and scale your swarm! [Click here to book a time that works for you!](https://calendly.com/swarm-corp/30min?month=2023-11) # License MIT diff --git a/docs/swarms/index.md b/docs/swarms/index.md index 56e009ac9..fc5c8e3e3 100644 --- a/docs/swarms/index.md +++ b/docs/swarms/index.md @@ -4,7 +4,7 @@ Swarms is a modular framework that enables reliable and useful multi-agent collaboration at scale to automate real-world tasks. - + diff --git a/swarms/__init__.py b/swarms/__init__.py index c89a55385..338cc8f95 100644 --- a/swarms/__init__.py +++ b/swarms/__init__.py @@ -4,12 +4,8 @@ warnings.filterwarnings("ignore", category=UserWarning) # disable tensorflow warnings - os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" from swarms.agents import * # noqa: E402, F403 from swarms.swarms import * # noqa: E402, F403 from swarms.structs import * # noqa: E402, F403 -from swarms.models import * # noqa: E402, F403 - -# from swarms.chunkers import * # noqa: E402, F403 -from swarms.workers import * # noqa: E402, F403 +from swarms.models import * # noqa: E402, F403 \ No newline at end of file diff --git a/swarms/models/layoutlm_document_qa.py b/swarms/models/layoutlm_document_qa.py index e2b8d1e45..1688a2319 100644 --- a/swarms/models/layoutlm_document_qa.py +++ b/swarms/models/layoutlm_document_qa.py @@ -26,7 +26,9 @@ def __init__( model_name: str = "impira/layoutlm-document-qa", task_type: str = "document-question-answering", ): - self.pipeline = pipeline(self.task_type, model=self.model_name) + self.model_name = model_name + self.task_type = task_type + self.pipeline = pipeline(task_type, model=self.model_name) def __call__(self, task: str, img_path: str): """Call for model""" diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 207c9ab9a..a2711e20c 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -1,25 +1,13 @@ -""" -TODO: -- add a method that scrapes all the methods from the llm object and outputs them as a string -- Add tools -- Add open interpreter style conversation -- Add memory vector database retrieval -- add batch processing -- add async processing for run and batch run -- add plan module -- concurrent -- Add batched inputs -""" import asyncio -import re +import inspect import json import logging +import random +import re import time from typing import Any, Callable, Dict, List, Optional, Tuple -from termcolor import colored -import inspect -import random +from termcolor import colored # Prompts DYNAMIC_STOP_PROMPT = """ diff --git a/swarms/swarms/autobloggen.py b/swarms/swarms/autobloggen.py index 2756825b5..dec2620f7 100644 --- a/swarms/swarms/autobloggen.py +++ b/swarms/swarms/autobloggen.py @@ -1,6 +1,6 @@ from termcolor import colored -from swarms.prompts.autoblogen import ( +from swarms.prompts.autobloggen import ( DRAFT_AGENT_SYSTEM_PROMPT, REVIEW_PROMPT, SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT, diff --git a/swarms/swarms/autoscaler.py b/swarms/swarms/autoscaler.py index 0db8db898..48b3aa97c 100644 --- a/swarms/swarms/autoscaler.py +++ b/swarms/swarms/autoscaler.py @@ -2,20 +2,21 @@ import queue import threading from time import sleep -from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator -from swarms.structs.flow import Flow -from typing import Dict, List, Callable +from typing import Callable, Dict, List + from termcolor import colored +from swarms.structs.flow import Flow +from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator + class AutoScaler: """ The AutoScaler is like a kubernetes pod, that autoscales an agent or worker or boss! - # TODO Handle task assignment and task delegation - # TODO: User task => decomposed into very small sub tasks => sub tasks assigned to workers => workers complete and update the swarm, can ask for help from other agents. - # TODO: Missing, Task Assignment, Task delegation, Task completion, Swarm level communication with vector db - + Wraps around a structure like SequentialWorkflow + and or Flow and parallelizes them on multiple threads so they're split across devices + and you can use them like that Args: initial_agents (int, optional): Number of initial agents. Defaults to 10. @@ -35,12 +36,13 @@ class AutoScaler: Usage ``` - # usage of usage - auto_scaler = AutoScaler(agent=YourCustomAgent) - auto_scaler.start() + from swarms.swarms import AutoScaler + from swarms.structs.flow import Flow + + @AutoScaler + flow = Flow() - for i in range(100): - auto_scaler.add_task9f"task {I}}) + flow.run("what is your name") ``` """ diff --git a/swarms/workers/__init__.py b/swarms/workers/__init__.py index dc72e5656..e69de29bb 100644 --- a/swarms/workers/__init__.py +++ b/swarms/workers/__init__.py @@ -1 +0,0 @@ -# from swarms.workers.worker import Worker diff --git a/swarms/workers/worker.py b/swarms/workers/worker.py deleted file mode 100644 index 9986666aa..000000000 --- a/swarms/workers/worker.py +++ /dev/null @@ -1,301 +0,0 @@ -import os -import random -from typing import Dict, Union - -import faiss -from langchain.chains.qa_with_sources.loading import ( - load_qa_with_sources_chain, -) -from langchain.docstore import InMemoryDocstore -from langchain.embeddings import OpenAIEmbeddings -from langchain.tools import ReadFileTool, WriteFileTool -from langchain.tools.human.tool import HumanInputRun -from langchain.vectorstores import FAISS -from langchain_experimental.autonomous_agents import AutoGPT - -from swarms.agents.message import Message -from swarms.tools.autogpt import ( - WebpageQATool, - process_csv, -) -from swarms.utils.decorators import error_decorator, timing_decorator - -# cache -ROOT_DIR = "./data/" - - -class Worker: - """ - Useful for when you need to spawn an autonomous agent instance as a worker to accomplish complex tasks, - it can search the internet or spawn child multi-modality models to process and generate images and text or audio and so on - - Parameters: - - `model_name` (str): The name of the language model to be used (default: "gpt-4"). - - `openai_api_key` (str): The OpenvAI API key (optional). - - `ai_name` (str): The name of the AI worker. - - `ai_role` (str): The role of the AI worker. - - `external_tools` (list): List of external tools (optional). - - `human_in_the_loop` (bool): Enable human-in-the-loop interaction (default: False). - - `temperature` (float): The temperature parameter for response generation (default: 0.5). - - `llm` (ChatOpenAI): Pre-initialized ChatOpenAI model instance (optional). - - `openai` (bool): If True, use the OpenAI language model; otherwise, use `llm` (default: True). - - Usage - ``` - from swarms import Worker - - node = Worker( - ai_name="Optimus Prime", - - ) - - task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." - response = node.run(task) - print(response) - ``` - - llm + tools + memory - - """ - - def __init__( - self, - ai_name: str = "Autobot Swarm Worker", - ai_role: str = "Worker in a swarm", - external_tools=None, - human_in_the_loop=False, - temperature: float = 0.5, - llm=None, - openai_api_key: str = None, - ): - self.temperature = temperature - self.human_in_the_loop = human_in_the_loop - self.llm = llm - self.openai_api_key = openai_api_key - self.ai_name = ai_name - self.ai_role = ai_role - self.coordinates = ( - random.randint(0, 100), - random.randint(0, 100), - ) # example coordinates for proximity - - self.setup_tools(external_tools) - self.setup_memory() - self.setup_agent() - - def reset(self): - """ - Reset the message history. - """ - self.message_history = ["Here is the conversation so far"] - - @property - def name(self): - """Name of the agent""" - return self.ai_name - - def receieve(self, name: str, message: str) -> None: - """ - Receive a message and update the message history. - - Parameters: - - `name` (str): The name of the sender. - - `message` (str): The received message. - """ - self.message_history.append(f"{name}: {message}") - - def send(self) -> str: - """Send message history.""" - self.agent.run(task=self.message_history) - - def add(self, task, priority=0): - """Add a task to the task queue.""" - self.task_queue.append((priority, task)) - - def setup_tools(self, external_tools): - """ - Set up tools for the worker. - - Parameters: - - `external_tools` (list): List of external tools (optional). - - Example: - ``` - external_tools = [MyTool1(), MyTool2()] - worker = Worker(model_name="gpt-4", - openai_api_key="my_key", - ai_name="My Worker", - ai_role="Worker", - external_tools=external_tools, - human_in_the_loop=False, - temperature=0.5) - ``` - """ - query_website_tool = WebpageQATool( - qa_chain=load_qa_with_sources_chain(self.llm) - ) - - self.tools = [ - WriteFileTool(root_dir=ROOT_DIR), - ReadFileTool(root_dir=ROOT_DIR), - process_csv, - query_website_tool, - HumanInputRun(), - # compile, - # VQAinference, - ] - if external_tools is not None: - self.tools.extend(external_tools) - - def setup_memory(self): - """ - Set up memory for the worker. - """ - openai_api_key = os.getenv("OPENAI_API_KEY") or self.openai_api_key - try: - embeddings_model = OpenAIEmbeddings(openai_api_key=openai_api_key) - embedding_size = 1536 - index = faiss.IndexFlatL2(embedding_size) - - self.vectorstore = FAISS( - embeddings_model.embed_query, index, InMemoryDocstore({}), {} - ) - - except Exception as error: - raise RuntimeError( - "Error setting up memory perhaps try try tuning the embedding size:" - f" {error}" - ) - - def setup_agent(self): - """ - Set up the autonomous agent. - """ - try: - self.agent = AutoGPT.from_llm_and_tools( - ai_name=self.ai_name, - ai_role=self.ai_role, - tools=self.tools, - llm=self.llm, - memory=self.vectorstore.as_retriever(search_kwargs={"k": 8}), - human_in_the_loop=self.human_in_the_loop, - ) - - except Exception as error: - raise RuntimeError(f"Error setting up agent: {error}") - - # @log_decorator - @error_decorator - @timing_decorator - def run(self, task: str = None): - """ - Run the autonomous agent on a given task. - - Parameters: - - `task`: The task to be processed. - - Returns: - - `result`: The result of the agent's processing. - """ - try: - result = self.agent.run([task]) - return result - except Exception as error: - raise RuntimeError(f"Error while running agent: {error}") - - # @log_decorator - @error_decorator - @timing_decorator - def __call__(self, task: str = None): - """ - Make the worker callable to run the agent on a given task. - - Parameters: - - `task`: The task to be processed. - - Returns: - - `results`: The results of the agent's processing. - """ - try: - results = self.agent.run([task]) - return results - except Exception as error: - raise RuntimeError(f"Error while running agent: {error}") - - def health_check(self): - pass - - # @log_decorator - @error_decorator - @timing_decorator - def chat(self, msg: str = None, streaming: bool = False): - """ - Run chat - - Args: - msg (str, optional): Message to send to the agent. Defaults to None. - language (str, optional): Language to use. Defaults to None. - streaming (bool, optional): Whether to stream the response. Defaults to False. - - Returns: - str: Response from the agent - - Usage: - -------------- - agent = MultiModalAgent() - agent.chat("Hello") - - """ - - # add users message to the history - self.history.append(Message("User", msg)) - - # process msg - try: - response = self.agent.run(msg) - - # add agent's response to the history - self.history.append(Message("Agent", response)) - - # if streaming is = True - if streaming: - return self._stream_response(response) - else: - response - - except Exception as error: - error_message = f"Error processing message: {str(error)}" - - # add error to history - self.history.append(Message("Agent", error_message)) - - return error_message - - def _stream_response(self, response: str = None): - """ - Yield the response token by token (word by word) - - Usage: - -------------- - for token in _stream_response(response): - print(token) - - """ - for token in response.split(): - yield token - - @staticmethod - def _message_to_dict(message: Union[Dict, str]): - """Convert a message""" - if isinstance(message, str): - return {"content": message} - else: - return message - - def is_within_proximity(self, other_worker): - """Using Euclidean distance for proximity check""" - distance = ( - (self.coordinates[0] - other_worker.coordinates[0]) ** 2 - + (self.coordinates[1] - other_worker.coordinates[1]) ** 2 - ) ** 0.5 - return distance < 10 # threshold for proximity