diff --git a/.gitignore b/.gitignore index 8d215a9b..4eb73496 100644 --- a/.gitignore +++ b/.gitignore @@ -149,6 +149,5 @@ private.sh api_docs/_build/ api_docs/_static/ cf -locust_file.py demo/ logs.py diff --git a/api_docs/conf.py b/api_docs/conf.py index 951d0c4b..51ff9249 100644 --- a/api_docs/conf.py +++ b/api_docs/conf.py @@ -14,7 +14,7 @@ project = "ChainFury" copyright = "2023, NimbleBox Engineering" author = "NimbleBox Engineering" -release = "1.6.0" +release = "1.6.1" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/api_docs/examples/agent-theory.rst b/api_docs/examples/agent-theory.rst new file mode 100644 index 00000000..2afc9cd9 --- /dev/null +++ b/api_docs/examples/agent-theory.rst @@ -0,0 +1,56 @@ +Fury Agents Manifesto +===================== + +**Starting date: 21st October, 2023** + + +ChainFury's `first commit`_ was on 7th April, 2023. It has been about 6 months since then and it has undergone lot of +production usage. With multiple API changes and engines, we are now at a stable place. This is also a good time to check +up on the things that have released in the wild till now. + +tl;dr +----- + +Predictable automated chains as agents, that use tree searching algorithms to find solution to a problem with given set +of actions. Has ability to create new actions and learn from feedback. + +Agents +------ + +There have been several "agent like" systems that have been released. Some can create code, others can perform advanced +searching. Ultimately all of them can be modelled as a Chain and use different algorithms. ``chainfury`` can support +all algorithms and has a type-based robust chaining engine. This means building agents is the next logical step. There +is a lot of theory and academic research done on the topic of agents. All of them have different tradeoffs. But first +let's start with the requirements of an agent. + +* Agent should be able to execute task without human intervention +* Agent should stop when it can't proceed +* Agent should be interruptible to take in feedback +* Agent should take inputs from it's environment +* Agent should be able to remember things over time +* Agent should be predictable in its behaviour, debuggable + +Von-Neumann machine +~~~~~~~~~~~~~~~~~~~ + +We are followers of the agent as a `Von-Neumann machine`_, which means each chain has a complete I/O mechanism where +each input and output can be accessed independently. ``chainfury`` can use different memory systems like VectorDB, etc. +meaning that it can persist data over time. For the CPU analogy we have :py:mod:`chainfury.base.Chain` which models the +execution as a DAG of :py:class:`chainfury.base.Node` objects. Each node contains the unit step of the chain. We can +parallellise and speed up executions by using :py:mod:`chainfury.utils.threaded_map`. + +``chainfury`` is already being used in production and thus with the infrastructure layer sorted we can then think about +what to build on top of it. + +Automated +~~~~~~~~~ + +One of the most important things is that these agents be automated and run without human in the loop. + +Edits +----- + +.. all the links here + +.. _first commit: https://github.com/NimbleBoxAI/ChainFury/commit/64a5f7b0fcf3d8bcce0cde6ee974b659ebe01b68 +.. _Von-Neumann machine: https://blog.nimblebox.ai/new-flow-engine-from-scratch diff --git a/api_docs/examples/qa-rag.rst b/api_docs/examples/qa-rag.rst index 39edf118..b7b0d288 100644 --- a/api_docs/examples/qa-rag.rst +++ b/api_docs/examples/qa-rag.rst @@ -1,5 +1,5 @@ -Question Answering using ChainFury -================================== +(RAG) Q/A with ChainFury +======================== One of the first use cases of LLM powered apps is question answering. This is how you should think about this problem: diff --git a/api_docs/index.rst b/api_docs/index.rst index 33a9b758..03277e28 100644 --- a/api_docs/index.rst +++ b/api_docs/index.rst @@ -66,6 +66,12 @@ Read the latest blog posts: source/chainfury.components examples/components-list +.. toctree:: + :maxdepth: 2 + :caption: Research + + examples/agent-theory + .. toctree:: :maxdepth: 2 :caption: Server diff --git a/cf_internal b/cf_internal index 6d40af97..ef45bcff 160000 --- a/cf_internal +++ b/cf_internal @@ -1 +1 @@ -Subproject commit 6d40af97e2e48d7a84f95b5e3a8ca426314a3e45 +Subproject commit ef45bcff3e9b3e20d942ff739820f83db0957e55 diff --git a/chainfury/agent.py b/chainfury/agent.py index a082e976..842e1be7 100644 --- a/chainfury/agent.py +++ b/chainfury/agent.py @@ -21,6 +21,7 @@ Node, Model, Var, + Chain, ) # Models diff --git a/chainfury/components/nbx/__init__.py b/chainfury/components/nbx/__init__.py deleted file mode 100644 index 83054167..00000000 --- a/chainfury/components/nbx/__init__.py +++ /dev/null @@ -1,113 +0,0 @@ -import random -import requests -from typing import Any, List, Optional - -from chainfury import Secret, model_registry, exponential_backoff, Model, UnAuthException -from chainfury.components.const import Env - - -def nbx_chat_api( - inputs: str, - nbx_deploy_url: str = "", - nbx_header_token: Secret = Secret(""), - best_of: int = 1, - decoder_input_details: bool = True, - details: bool = True, - do_sample: bool = True, - max_new_tokens: int = 20, - repetition_penalty: float = 1.03, - return_full_text: bool = False, - seed: int = None, # type: ignore # see components README.md - stop: List[str] = [], - temperature: float = 0.5, - top_k: int = 10, - top_p: float = 0.95, - truncate: int = None, # type: ignore # see components README.md - typical_p: float = 0.95, - watermark: bool = True, - *, - retry_count: int = 3, - retry_delay: int = 1, -) -> Any: - """ - Returns a JSON object containing the OpenAI's API chat response. - - Args: - inputs (str): The prompt to send to the API. - nbx_deploy_url (str): The NBX deploy URL. Defaults to the value of NBX_DEPLOY_URL environment variable. - nbx_header_token (Secret): The NBX header token. Defaults to the value of NBX_DEPLOY_KEY environment variable. - best_of (int): The number of outputs to generate and return. Defaults to 1. - decoder_input_details (bool): Whether to return the decoder input details. Defaults to True. - details (bool): Whether to return the details. Defaults to True. - do_sample (bool): Whether to use sampling. Defaults to True. - max_new_tokens (int): The maximum number of tokens to generate. Defaults to 20. - repetition_penalty (float): The repetition penalty. Defaults to 1.03. - return_full_text (bool): Whether to return the full text. Defaults to False. - seed (int): The seed to use for random number generation. Defaults to a random integer between 0 and 2^32 - 1. - stop (List[str]): The stop tokens. Defaults to an empty list. - temperature (float): The temperature. Defaults to 0.5. - top_k (int): The top k. Defaults to 10. - top_p (float): The top p. Defaults to 0.95. - truncate (int): The truncate. Defaults to None. - typical_p (float): The typical p. Defaults to 0.95. - watermark (bool): Whether to include the watermark. Defaults to True. - retry_count (int): The number of times to retry the API call. Defaults to 3. - retry_delay (int): The number of seconds to wait between retries. Defaults to 1. - - Returns: - Any: The JSON object containing the OpenAI's API chat response. - """ - if not nbx_deploy_url: - nbx_deploy_url = Env.NBX_DEPLOY_URL("") - if not nbx_deploy_url: - raise Exception("NBX_DEPLOY_URL not set, please set it in your environment or pass it as an argument") - - if not nbx_header_token: - nbx_header_token = Secret(Env.NBX_DEPLOY_KEY("")).value # type: ignore - if not nbx_header_token: - raise Exception("NBX_DEPLOY_KEY not set, please set it in your environment or pass it as an argument") - - seed = seed or random.randint(0, 2**32 - 1) - - def _fn(): - r = requests.post( - nbx_deploy_url + "/generate", - headers={"NBX-KEY": nbx_header_token}, - json={ - "inputs": inputs, - "parameters": { - "best_of": best_of, - "decoder_input_details": decoder_input_details, - "details": details, - "do_sample": do_sample, - "max_new_tokens": max_new_tokens, - "repetition_penalty": repetition_penalty, - "return_full_text": return_full_text, - "seed": seed, - "stop": stop, - "temperature": temperature, - "top_k": top_k, - "top_p": top_p, - "truncate": truncate, - "typical_p": typical_p, - "watermark": watermark, - }, - }, - ) - if r.status_code == 401: - raise UnAuthException(r.text) - if r.status_code != 200: - raise Exception(f"OpenAI API returned status code {r.status_code}: {r.text}") - return r.json() - - return exponential_backoff(_fn, max_retries=retry_count, retry_delay=retry_delay) - - -model_registry.register( - model=Model( - collection_name="nbx", - id="nbx-deploy", - fn=nbx_chat_api, - description="Call NimbleBox LLMOps deploy API", - ), -) diff --git a/chainfury/components/stability/__init__.py b/chainfury/components/stability/__init__.py index c57d9575..ace15242 100644 --- a/chainfury/components/stability/__init__.py +++ b/chainfury/components/stability/__init__.py @@ -5,7 +5,7 @@ You need to have `stability_sdk` installed to use this component. You can install it with: .. code-block:: bash - + pip install chainfury[stability] # or to install all the components, note this will keep on growing pip install chainfury[all] diff --git a/chainfury/utils.py b/chainfury/utils.py index dee3f3d8..7c59d0e6 100644 --- a/chainfury/utils.py +++ b/chainfury/utils.py @@ -144,7 +144,10 @@ def get_logger() -> logging.Logger: logger.setLevel(getattr(logging, lvl)) log_handler = logging.StreamHandler() log_handler.setFormatter( - logging.Formatter("[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%dT%H:%M:%S%z") + logging.Formatter( + "[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S%z", + ) ) logger.addHandler(log_handler) return logger @@ -208,7 +211,7 @@ def exponential_backoff(foo, *args, max_retries=2, retry_delay=1, **kwargs) -> D def get_files_in_folder( folder, - ext=["*"], + ext="*", ig_pat: str = "", abs_path: bool = True, followlinks: bool = False, @@ -216,6 +219,7 @@ def get_files_in_folder( """Get files with `ext` in `folder`""" # this method is faster than glob all_paths = [] + ext = [ext] if isinstance(ext, str) else ext _all = "*" in ext # wildcard means everything so speed up ignore_pat = re.compile(ig_pat) @@ -252,7 +256,9 @@ def joinp(x: str, *args) -> str: """ -def threaded_map(fn, inputs: List[Tuple[Any]], wait: bool = True, max_threads=20, _name: str = "") -> Union[Dict[Future, int], List[Any]]: +def threaded_map( + fn, inputs: List[Tuple], wait: bool = True, max_threads=20, post_fn=None, _name: str = "" +) -> Union[Dict[Future, int], List[Any]]: """ inputs is a list of tuples, each tuple is the input for single invocation of fn. order is preserved. @@ -261,6 +267,7 @@ def threaded_map(fn, inputs: List[Tuple[Any]], wait: bool = True, max_threads=20 inputs (List[Tuple[Any]]): All the inputs to the function, can be a generator wait (bool, optional): If true, wait for all the threads to finish, otherwise return a dict of futures. Defaults to True. max_threads (int, optional): The maximum number of threads to use. Defaults to 20. + post_fn (function, optional): A function to call with the result. Defaults to None. _name (str, optional): The name of the thread pool. Defaults to "". """ _name = _name or str(uuid4()) @@ -273,12 +280,48 @@ def threaded_map(fn, inputs: List[Tuple[Any]], wait: bool = True, max_threads=20 for future in as_completed(futures): try: i, res = future.result() + if post_fn: + res = post_fn(res) results[i] = res except Exception as e: raise e return results +def batched(iterable, n): + """Convert any ``iterable`` to a generator of batches of size ``n``, last one may be smaller. + Python 3.12 has ``itertools.batched`` which does the same thing. + + Example: + >>> for x in batched(range(10), 3): + ... print(x) + [0, 1, 2] + [3, 4, 5] + [6, 7, 8] + [9] + + Args: + iterable (Iterable): The iterable to convert to batches + n (int): The batch size + + Yields: + Iterator: The batched iterator + """ + done = False + buffer = [] + _iter = iter(iterable) + while not done: + try: + buffer.append(next(_iter)) + if len(buffer) == n: + yield buffer + buffer = [] + except StopIteration: + done = True + if buffer: + yield buffer + + """ Ser/Deser """ @@ -326,6 +369,11 @@ def from_json(fp: str = "") -> Dict[str, Any]: return json.loads(fp) +""" +Time management should be dead easy. +""" + + class SimplerTimes: """ A class that provides a simpler interface to datetime and time modules. diff --git a/chainfury/version.py b/chainfury/version.py index 7831c4b2..00777212 100644 --- a/chainfury/version.py +++ b/chainfury/version.py @@ -1,4 +1,4 @@ -__version__ = "1.6.0" +__version__ = "1.6.1" _major, _minor, _patch = __version__.split(".") _major = int(_major) _minor = int(_minor) diff --git a/pyproject.toml b/pyproject.toml index 942211e2..fa651942 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "chainfury" -version = "1.6.0" +version = "1.6.1" description = "ChainFury is a powerful tool that simplifies the creation and management of chains of prompts, making it easier to build complex chat applications using LLMs." authors = ["NimbleBox Engineering "] license = "Apache 2.0" diff --git a/stories/fury.json b/stories/fury.json deleted file mode 100644 index 302d3a3f..00000000 --- a/stories/fury.json +++ /dev/null @@ -1,481 +0,0 @@ -{ - "nodes": [ - { - "id": "find-quote", - "type": "ai-powered", - "fn": { - "node_id": "find-quote", - "model": { - "collection_name": "openai", - "model_id": "openai-chat", - "description": "Given a list of messages describing a conversation, the model will return a response.", - "tags": [], - "vars": [] - }, - "model_params": { - "model": "gpt-3.5-turbo" - }, - "fn": { - "messages": [ - { - "role": "user", - "content": "'{{ quote }}' \nWho said this quote, if you don't know then reply with a random character from history world? Give reply in less than 10 words." - } - ] - }, - "action_source": "jinja-template" - }, - "description": "", - "fields": [ - { - "type": "string", - "required": true, - "name": "quote" - }, - { - "type": "string", - "password": true, - "required": true, - "show": true, - "name": "openai_api_key" - }, - { - "type": "string", - "required": true, - "show": true, - "name": "model" - }, - { - "type": "array", - "items": [ - { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - ], - "required": true, - "show": true, - "name": "messages" - }, - { - "type": "number", - "placeholder": "1.0", - "show": true, - "name": "temperature" - }, - { - "type": "number", - "placeholder": "1.0", - "show": true, - "name": "top_p" - }, - { - "type": "number", - "placeholder": "1", - "show": true, - "name": "n" - }, - { - "type": [ - { - "type": "string" - }, - { - "type": "array", - "items": [ - { - "type": "string" - } - ] - } - ], - "show": true, - "name": "stop" - }, - { - "type": "number", - "placeholder": "1024", - "show": true, - "name": "max_tokens" - }, - { - "type": "number", - "placeholder": "0.0", - "show": true, - "name": "presence_penalty" - }, - { - "type": "number", - "placeholder": "0.0", - "show": true, - "name": "frequency_penalty" - }, - { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "placeholder": "{}", - "show": true, - "name": "logit_bias" - }, - { - "type": "string", - "show": true, - "name": "user" - }, - { - "type": "boolean", - "placeholder": "False", - "show": true, - "name": "raw" - } - ], - "outputs": [ - { - "type": "any", - "name": "chat_reply", - "loc": [ - "choices", - 0, - "message", - "content" - ] - } - ] - }, - { - "id": "tell-character-story", - "type": "ai-powered", - "fn": { - "node_id": "tell-character-story", - "model": { - "collection_name": "openai", - "model_id": "openai-chat", - "description": "Given a list of messages describing a conversation, the model will return a response.", - "tags": [], - "vars": [] - }, - "model_params": { - "model": "gpt-3.5-turbo" - }, - "fn": { - "messages": [ - { - "role": "user", - "content": "Tell a small {{ story_size }} line story about '{{ character_name }}'" - } - ] - }, - "action_source": "jinja-template" - }, - "description": "", - "fields": [ - { - "type": "string", - "required": true, - "name": "story_size" - }, - { - "type": "string", - "required": true, - "name": "character_name" - }, - { - "type": "string", - "password": true, - "required": true, - "show": true, - "name": "openai_api_key" - }, - { - "type": "string", - "required": true, - "show": true, - "name": "model" - }, - { - "type": "array", - "items": [ - { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - ], - "required": true, - "show": true, - "name": "messages" - }, - { - "type": "number", - "placeholder": "1.0", - "show": true, - "name": "temperature" - }, - { - "type": "number", - "placeholder": "1.0", - "show": true, - "name": "top_p" - }, - { - "type": "number", - "placeholder": "1", - "show": true, - "name": "n" - }, - { - "type": [ - { - "type": "string" - }, - { - "type": "array", - "items": [ - { - "type": "string" - } - ] - } - ], - "show": true, - "name": "stop" - }, - { - "type": "number", - "placeholder": "1024", - "show": true, - "name": "max_tokens" - }, - { - "type": "number", - "placeholder": "0.0", - "show": true, - "name": "presence_penalty" - }, - { - "type": "number", - "placeholder": "0.0", - "show": true, - "name": "frequency_penalty" - }, - { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "placeholder": "{}", - "show": true, - "name": "logit_bias" - }, - { - "type": "string", - "show": true, - "name": "user" - }, - { - "type": "boolean", - "placeholder": "False", - "show": true, - "name": "raw" - } - ], - "outputs": [ - { - "type": "any", - "name": "characters_story", - "loc": [ - "choices", - 0, - "message", - "content" - ] - } - ] - }, - { - "id": "deep-rap-quote", - "type": "ai-powered", - "fn": { - "node_id": "deep-rap-quote", - "model": { - "collection_name": "openai", - "model_id": "openai-chat", - "description": "Given a list of messages describing a conversation, the model will return a response.", - "tags": [], - "vars": [] - }, - "model_params": { - "model": "gpt-3.5-turbo" - }, - "fn": { - "messages": [ - { - "role": "user", - "content": "give a deep 8 line rap quote on life in the style of {{ character }}." - } - ] - }, - "action_source": "jinja-template" - }, - "description": "AI will tell a joke on any topic you tell it to talk about", - "fields": [ - { - "type": "string", - "required": true, - "name": "character" - }, - { - "type": "string", - "password": true, - "required": true, - "show": true, - "name": "openai_api_key" - }, - { - "type": "string", - "required": true, - "show": true, - "name": "model" - }, - { - "type": "array", - "items": [ - { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - ], - "required": true, - "show": true, - "name": "messages" - }, - { - "type": "number", - "placeholder": "1.0", - "show": true, - "name": "temperature" - }, - { - "type": "number", - "placeholder": "1.0", - "show": true, - "name": "top_p" - }, - { - "type": "number", - "placeholder": "1", - "show": true, - "name": "n" - }, - { - "type": [ - { - "type": "string" - }, - { - "type": "array", - "items": [ - { - "type": "string" - } - ] - } - ], - "show": true, - "name": "stop" - }, - { - "type": "number", - "placeholder": "1024", - "show": true, - "name": "max_tokens" - }, - { - "type": "number", - "placeholder": "0.0", - "show": true, - "name": "presence_penalty" - }, - { - "type": "number", - "placeholder": "0.0", - "show": true, - "name": "frequency_penalty" - }, - { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "placeholder": "{}", - "show": true, - "name": "logit_bias" - }, - { - "type": "string", - "show": true, - "name": "user" - }, - { - "type": "boolean", - "placeholder": "False", - "show": true, - "name": "raw" - } - ], - "outputs": [ - { - "type": "any", - "name": "chat_reply", - "loc": [ - "choices", - 0, - "message", - "content" - ] - } - ] - } - ], - "edges": [ - { - "src_node_id": "find-quote", - "trg_node_id": "tell-character-story", - "connections": [ - [ - "chat_reply", - "character_name" - ] - ] - }, - { - "src_node_id": "tell-character-story", - "trg_node_id": "deep-rap-quote", - "connections": [ - [ - "characters_story", - "character" - ] - ] - } - ], - "topo_order": [ - "find-quote", - "tell-character-story", - "deep-rap-quote" - ], - "sample": { - "openai_api_key": "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", - "quote": "hello there nice world", - "story_size": 2 - }, - "main_in": "quote", - "main_out": "deep-rap-quote/chat_reply" -} \ No newline at end of file diff --git a/stories/fury_algo.py b/stories/fury_algo.py index 84c702c5..3c4f985a 100644 --- a/stories/fury_algo.py +++ b/stories/fury_algo.py @@ -1,37 +1,10 @@ # what are some interesting algorithms that we can build using fury? -import os -import json +import re import fire from pprint import pformat -from requests import Session -from typing import Dict, Any -from chainfury import ( - Chain, - programatic_actions_registry, - model_registry, - Node, - ai_actions_registry, - Edge, -) - - -def _get_openai_token() -> str: - openai_token = os.environ.get("OPENAI_TOKEN", "") - if not openai_token: - raise ValueError("OpenAI token not found") - return openai_token - - -def _get_nbx_token() -> Dict[str, str]: - nbx_token = os.environ.get("NBX_DEPLOY_KEY", "") - if not nbx_token: - raise ValueError("NBX token not found") - nbx_url = os.environ.get("NBX_DEPLOY_URL", "") - if not nbx_url: - raise ValueError("NBX url not found") - return {"nbx_deploy_url": nbx_url, "nbx_header_token": nbx_token} +from chainfury import Chain, ai_actions_registry, Edge class Actions: @@ -203,21 +176,21 @@ class Actions: class Chains: story = Chain( [Actions.sensational_story], - sample={"openai_api_key": _get_openai_token(), "scene": ""}, + sample={"scene": ""}, main_in="scene", main_out=f"{Actions.sensational_story.id}/story", ) # type: ignore story_nbx = Chain( [Actions.sensational_story_nbx], - sample={"scene": "", **_get_nbx_token()}, + sample={"scene": ""}, main_in="scene", main_out=f"{Actions.sensational_story_nbx.id}/story", ) # type: ignore feedback = Chain( [Actions.people_feedback], - sample={"openai_api_key": _get_openai_token(), "story": ""}, + sample={"story": ""}, main_in="story", main_out=f"{Actions.people_feedback.id}/story_accepted", ) # type: ignore @@ -227,7 +200,7 @@ class Chains: [ Edge(Actions.sensational_story.id, "story", Actions.catchy_headline.id, "story"), ], - sample={"openai_api_key": _get_openai_token(), "scene": ""}, + sample={"scene": ""}, main_in="scene", main_out=f"{Actions.catchy_headline.id}/headline", ) @@ -238,7 +211,7 @@ class Chains: Edge(Actions.topic_to_synopsis.id, "synopsis", Actions.sensational_story.id, "scene"), Edge(Actions.sensational_story.id, "story", Actions.catchy_headline.id, "story"), ], - sample={"openai_api_key": _get_openai_token(), "topics": ""}, + sample={"topics": ""}, main_in="topics", main_out=f"{Actions.catchy_headline.id}/headline", ) @@ -256,7 +229,7 @@ class Chains: Edge(Actions.catchy_headline.id, "headline", Actions.sensational_story_generator.id, "headline"), Edge(Actions.topic_to_synopsis.id, "synopsis", Actions.sensational_story_generator.id, "sub_headline"), ], - sample={"openai_api_key": _get_openai_token(), "topics": ""}, + sample={"topics": ""}, main_in="topics", main_out=f"{Actions.catchy_headline.id}/headline", ) @@ -264,7 +237,7 @@ class Chains: good_story = Chain( [Actions.sensational_story, Actions.corrupt_editor_check], [Edge(Actions.sensational_story.id, "story", Actions.corrupt_editor_check.id, "story")], # type: ignore - sample={"openai_api_key": _get_openai_token(), "scene": ""}, + sample={"scene": ""}, main_in="scene", main_out=f"{Actions.corrupt_editor_check.id}/story_accepted", ) # type: ignore @@ -289,16 +262,6 @@ def chain_of_thought(scene: str, v: bool = False): return out -def chain_of_thought_topic(topics: str, v: bool = False): - if isinstance(topics, tuple): - topics = ", ".join(topics) - out, thoughts = Chains.topic_to_story(topics) # type: ignore - if v: - print("BUFF:", pformat(thoughts)) - print(" OUT:", out) - return out - - # self consistency with CoT (CoT-SC) # https://arxiv.org/pdf/2203.11171.pdf def cot_consistency(scene, n: int = 3, v: bool = False, pb: bool = False): @@ -334,8 +297,6 @@ def __init__(self, max_search_space: int = 5): self.value_fn = Chains.feedback def __call__(self, topics: str, v: bool = False): - import re - done = False total_searches = 0 result = None @@ -387,14 +348,8 @@ def tree_of_thought(topics: str, max_search_space: int = 5, v: bool = False): "algo": { "io": io_prompting, "cot": chain_of_thought, - "cot_t": chain_of_thought_topic, "cot-sc": cot_consistency, "tot": tree_of_thought, }, - "chains": Chains, - "print": { - "action": lambda x: print(getattr(Actions, x).to_json()), - "chain": lambda x: print(getattr(Chains, x).to_json()), - }, } ) diff --git a/stories/fury_core.py b/stories/fury_core.py deleted file mode 100644 index 4895f8f4..00000000 --- a/stories/fury_core.py +++ /dev/null @@ -1,314 +0,0 @@ -import os -import json -import fire -from pprint import pformat -from requests import Session -from typing import Dict, Any - -from chainfury import ( - Chain, - programatic_actions_registry, - model_registry, - Node, - ai_actions_registry, - Edge, -) - - -def _get_openai_token() -> str: - openai_token = os.environ.get("OPENAI_TOKEN", "") - if not openai_token: - raise ValueError("OpenAI token not found") - return openai_token - - -def _get_cf_token() -> str: - cf_token = os.environ.get("CF_TOKEN", "") - if not cf_token: - raise ValueError("CF_TOKEN token not found") - return cf_token - - -class _Nodes: - def callp(self, fail: bool = False): - """Call a programatic action""" - node = programatic_actions_registry.get("call_api_requests") - print("NODE:", node) - data = { - "method": "get", - "url": "http://127.0.0.1:8000/api/v1/fury/components/", - "headers": {"token": _get_cf_token()}, - } - if fail: - data["some-key"] = "some-value" - out, err = node(data) - if err: - print("ERROR:", err) - print("TRACE:", out) - return - print("OUT:", out) - - def callm(self, fail: bool = False): - """Call a model""" - model = model_registry.get("openai-completion") - print("Found model:", model) - data = { - "openai_api_key": _get_openai_token(), - "model": "text-curie-001", - "prompt": "What comes after 0,1,1,2?", - } - if fail: - data["model"] = "this-does-not-exist" - out, err = model(data) - if err: - print("ERROR:", err) - print("TRACE:", out) - return - print("OUT:", out) - - def callai(self, fail: bool = False): - """Call the AI action""" - if fail: - action_id = "write-a-poem" - else: - action_id = "hello-world" - action = ai_actions_registry.get(action_id) - # print(action) - - out, err = action( - { - "openai_api_key": _get_openai_token(), - "message": "hello world", - "temperature": 0.12, - # "style": "snoop dogg", # uncomment to get the fail version running correctly - } - ) - if err: - print("ERROR:", err) - print("TRACE:", out) - return - print("OUT:", out) - - def callai_chat(self, character: str = "a mexican taco"): - """Call the AI action""" - action_id = "deep-rap-quote" - action = ai_actions_registry.get(action_id) - print("ACTION:", action) - - out, err = action( - { - "openai_api_key": _get_openai_token(), - "character": character, - }, - ) # type: ignore - if err: - print("ERROR:", err) - print("TRACE:", out) - return - print("OUT:", out) - - -class _Chain: - def callpp(self): - p1 = programatic_actions_registry.get("call_api_requests") - p2 = programatic_actions_registry.get("regex_substitute") - e = Edge(p1.id, "text", p2.id, "text") # type: ignore - c = Chain([p1, p2], [e], sample={"url": ""}, main_in="url", main_out=f"{p2.id}/text") # type: ignore - print("CHAIN:", c) - - # run the chain - out, full_ir = c( - { - "method": "GET", - "url": "http://127.0.0.1:8000/api/v1/fury/", - "headers": {"token": _get_cf_token()}, - "pattern": "JWT", - "repl": "booboo-hooooo", - }, - ) - print("BUFF:", pformat(full_ir)) - print("OUT:", pformat(out)) - - def callpj(self, fail: bool = False): - p = programatic_actions_registry.get("call_api_requests") - - # create a new ai action to build a poem - NODE_ID = "sarcastic-agent" - j = ai_actions_registry.register( - node_id=NODE_ID, - description="AI will add two numbers and give a sarscastic response. J-type action", - model_id="openai-chat", - model_params={ - "model": "gpt-3.5-turbo", - }, - fn={ - "messages": [ - { - "role": "user", - "content": "Hello there, can you add these two numbers for me? 1023, 97. Be super witty in all responses.", - }, - { - "role": "assistant", - "content": "It is 1110. WTF I mean I am a powerful AI, I have better things to do!", - }, - { - "role": "user", - "content": "Can you explain this json to me? {{ json_thingy }}", - }, - ], - }, - outputs={ - "chat_reply": ("choices", 0, "message", "content"), - }, - ) - print("ACTION:", j) - - e = Edge(p.id, "text", j.id, "json_thingy") - - c = Chain( - [p, j], - [e], - sample={ - "method": "GET", - }, - main_in="url", - main_out=f"{j.id}/chat_reply", - ) - print("CHAIN:", c) - - # run the chain - out, full_ir = c( - { - "method": "get", - "url": "http://127.0.0.1:8000/api/v1/fury/", - "headers": {"token": _get_cf_token()}, - "openai_api_key": _get_openai_token(), - } - ) - print("BUFF:", pformat(full_ir)) - print("OUT:", pformat(out)) - - def calljj(self): - j1 = ai_actions_registry.get("hello-world") - print("ACTION:", j1) - j2 = ai_actions_registry.get("deep-rap-quote") - print("ACTION:", j2) - e = Edge(j1.id, "generations", j2.id, "character") - c = Chain([j1, j2], [e], sample={"message": "hello world"}, main_in="message", main_out=f"{j2.id}/chat_reply") - print("CHAIN:", c) - - # run the chain - out, full_ir = c( - { - "openai_api_key": _get_openai_token(), - "message": "hello world", - } - ) - print("BUFF:", pformat(full_ir)) - print("OUT:", pformat(out)) - - def callj3(self, quote: str, n: int = 4, thoughts: bool = False, to_json: bool = False): - findQuote = ai_actions_registry.register( - node_id="find-quote", - model_id="openai-chat", - model_params={ - "model": "gpt-3.5-turbo", - }, - fn={ - "messages": [ - { - "role": "user", - "content": "'{{ quote }}' \nWho said this quote, if you don't know then reply with a random character from history world? Give reply in less than 10 words.", - }, - ], - }, - outputs={ - "chat_reply": ("choices", 0, "message", "content"), - }, - ) - - charStory = ai_actions_registry.register( - node_id="tell-character-story", - model_id="openai-chat", - model_params={ - "model": "gpt-3.5-turbo", - }, - fn={ - "messages": [ - {"role": "user", "content": "Tell a small {{ story_size }} line story about '{{ character_name }}'"}, - ], - }, - outputs={ - "characters_story": ("choices", 0, "message", "content"), - }, - ) - rapMaker = ai_actions_registry.get("deep-rap-quote") - e1 = Edge(findQuote.id, "chat_reply", charStory.id, "character_name") - e2 = Edge(charStory.id, "characters_story", rapMaker.id, "character") - c = Chain( - [findQuote, charStory, rapMaker], - [e1, e2], - sample={"quote": quote}, - main_in="quote", - main_out=f"{rapMaker.id}/chat_reply", - ) - print("CHAIN:", c) - - sample_input = {"openai_api_key": _get_openai_token(), "quote": quote, "story_size": n} # these will also act like defaults - # sample_input = {"quote": quote, "story_size": n} # these will also act like defaults - - if to_json: - print(json.dumps(c.to_dict("quote", f"{rapMaker.id}/chat_reply", sample_input), indent=2)) - return - - # run the chain - sample_input["openai_api_key"] = _get_openai_token() - out, full_ir = c( - sample_input, - print_thoughts=thoughts, - ) - - print("BUFF:", pformat(full_ir)) - print("OUT:", pformat(out)) - - def from_json(self, quote: str = "", n: int = 4, mainline: bool = False, thoughts: bool = False, path: str = "./stories/fury.json"): - with open(path) as f: - dag = json.load(f) - c = Chain.from_dict(dag) - print("CHAIN:", c) - - if mainline: - input = quote - else: - # run the chain - input = {"openai_api_key": _get_openai_token()} - if quote: - input["quote"] = quote - if n: - input["story_size"] = n - out, full_ir = c( - input, - print_thoughts=thoughts, - ) - print("BUFF:", pformat(full_ir)) - print("OUT:", pformat(out)) - - -if __name__ == "__main__": - - def help(): - return """ -Fury Story -========== - -python3 -m stories.fury nodes callp [--fail] -python3 -m stories.fury nodes callai [--jtype --fail] -python3 -m stories.fury nodes callai_chat [--jtype --fail] - -python3 -m stories.fury chain callpp -python3 -m stories.fury chain callpj -python3 -m stories.fury chain calljj -python3 -m stories.fury chain callj3 --quote QUOTE -""".strip() - - fire.Fire({"nodes": _Nodes, "chain": _Chain, "help": help}) diff --git a/stories/fury_to_db.py b/stories/fury_to_db.py deleted file mode 100644 index 93b939b0..00000000 --- a/stories/fury_to_db.py +++ /dev/null @@ -1,25 +0,0 @@ -# from chainfury import ai_actions_registry, cf_client - -# make an action -sensational_story = ai_actions_registry.to_action( - name="sensational_story", - model_id="openai-chat", - model_params={ - "model": "gpt-3.5-turbo", - }, - fn={ - "messages": [ - { - "role": "user", - "content": "You are a Los Santos correspondent and saw '{{ scene }}'. Make it into a small 6 line witty, sarcastic, funny sensational story as if you are on Radio Mirror Park.", - }, - ], - }, - outputs={ - "story": ("choices", 0, "message", "content"), - }, -) - -# -# sensational_story = cf_client.get_or_create_node(sensational_story) -out = sensational_story("there are times, when I don't know what to do!") diff --git a/stories/test.sh b/stories/test.sh deleted file mode 100644 index b72a39ac..00000000 --- a/stories/test.sh +++ /dev/null @@ -1,21 +0,0 @@ -echo "######\n> python3 -m stories.test_core\n######" && python3 -m stories.test_core --verbose - -echo "######\n> python3 -m stories.fury_core nodes callm\n######" && python3 -m stories.fury_core nodes callm -echo "######\n> python3 -m stories.fury_core nodes callai\n######" && python3 -m stories.fury_core nodes callai -echo "######\n> python3 -m stories.fury_core nodes callai_chat\n######" && python3 -m stories.fury_core nodes callai_chat - -QUOTE="to great men and women who defined a new era for humanity!" - -echo "######\n> python3 -m stories.fury_core chain callpp\n######" && python3 -m stories.fury_core chain callpp -echo "######\n> python3 -m stories.fury_core chain callpj\n######" && python3 -m stories.fury_core chain callpj -echo "######\n> python3 -m stories.fury_core chain calljj\n######" && python3 -m stories.fury_core chain calljj -echo "######\n> python3 -m stories.fury_core chain callj3\n######" && python3 -m stories.fury_core chain callj3 --quote "$QUOTE" - -SCENE="ufo attacked a crow and stole 5 year olds ice cream" -TOPICS='dolphins, redbull, 5 year old' - -echo "######\n> python3 -m stories.fury_algo algo io\n######" && python3 -m stories.fury_algo algo io "$SCENE" -echo "######\n> python3 -m stories.fury_algo algo cot\n######" && python3 -m stories.fury_algo algo cot "$SCENE" -echo "######\n> python3 -m stories.fury_algo algo cot_t\n######" && python3 -m stories.fury_algo algo cot_t "$TOPICS" -echo "######\n> python3 -m stories.fury_algo algo cot-sc\n######" && python3 -m stories.fury_algo algo cot-sc "$SCENE" --n 3 -echo "######\n> python3 -m stories.fury_algo algo tot\n######" && python3 -m stories.fury_algo algo tot "$TOPICS" --max_search_space 2 diff --git a/stories/test_core.py b/stories/test_getkv.py similarity index 100% rename from stories/test_core.py rename to stories/test_getkv.py