Skip to content

Commit

Permalink
Move more app files to app package (Significant-Gravitas#5036)
Browse files Browse the repository at this point in the history
Co-authored-by: Nicholas Tindle <[email protected]>
  • Loading branch information
collijk and ntindle authored Aug 1, 2023
1 parent 7cd407b commit a593c32
Show file tree
Hide file tree
Showing 10 changed files with 186 additions and 201 deletions.
16 changes: 8 additions & 8 deletions autogpt/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,14 @@
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
from autogpt.app.configurator import create_config
from autogpt.app.setup import prompt_user
from autogpt.app.spinner import Spinner
from autogpt.app.utils import (
clean_input,
get_current_git_branch,
get_latest_bulletin,
get_legal_warning,
markdown_to_ansi_style,
)
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
from autogpt.llm.api_manager import ApiManager
Expand All @@ -22,14 +30,6 @@
from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import (
clean_input,
get_current_git_branch,
get_latest_bulletin,
get_legal_warning,
markdown_to_ansi_style,
)
from autogpt.workspace import Workspace
from scripts.install_plugin_deps import install_plugin_dependencies

Expand Down
2 changes: 1 addition & 1 deletion autogpt/app/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from colorama import Fore, Style
from jinja2 import Template

from autogpt import utils
from autogpt.app import utils
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm.base import ChatSequence, Message
Expand Down
File renamed without changes.
147 changes: 147 additions & 0 deletions autogpt/app/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
import os
import re

import requests
from colorama import Fore, Style
from git.repo import Repo
from prompt_toolkit import ANSI, PromptSession
from prompt_toolkit.history import InMemoryHistory

from autogpt.config import Config
from autogpt.logs import logger

session = PromptSession(history=InMemoryHistory())


def clean_input(config: Config, prompt: str = "", talk=False):
try:
if config.chat_messages_enabled:
for plugin in config.plugins:
if not hasattr(plugin, "can_handle_user_input"):
continue
if not plugin.can_handle_user_input(user_input=prompt):
continue
plugin_response = plugin.user_input(user_input=prompt)
if not plugin_response:
continue
if plugin_response.lower() in [
"yes",
"yeah",
"y",
"ok",
"okay",
"sure",
"alright",
]:
return config.authorise_key
elif plugin_response.lower() in [
"no",
"nope",
"n",
"negative",
]:
return config.exit_key
return plugin_response

# ask for input, default when just pressing Enter is y
logger.info("Asking user via keyboard...")

# handle_sigint must be set to False, so the signal handler in the
# autogpt/main.py could be employed properly. This referes to
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
answer = session.prompt(ANSI(prompt), handle_sigint=False)
return answer
except KeyboardInterrupt:
logger.info("You interrupted Auto-GPT")
logger.info("Quitting...")
exit(0)


def get_bulletin_from_web():
try:
response = requests.get(
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
)
if response.status_code == 200:
return response.text
except requests.exceptions.RequestException:
pass

return ""


def get_current_git_branch() -> str:
try:
repo = Repo(search_parent_directories=True)
branch = repo.active_branch
return branch.name
except:
return ""


def get_latest_bulletin() -> tuple[str, bool]:
exists = os.path.exists("data/CURRENT_BULLETIN.md")
current_bulletin = ""
if exists:
current_bulletin = open(
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
).read()
new_bulletin = get_bulletin_from_web()
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin

news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
if new_bulletin or current_bulletin:
news_header += (
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
"If you don't wish to see this message, you "
"can run Auto-GPT with the *--skip-news* flag.\n"
)

if new_bulletin and is_new_news:
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"

return f"{news_header}\n{current_bulletin}", is_new_news


def markdown_to_ansi_style(markdown: str):
ansi_lines: list[str] = []
for line in markdown.split("\n"):
line_style = ""

if line.startswith("# "):
line_style += Style.BRIGHT
else:
line = re.sub(
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
rf"{Style.BRIGHT}\1{Style.NORMAL}",
line,
)

if re.match(r"^#+ ", line) is not None:
line_style += Fore.CYAN
line = re.sub(r"^#+ ", "", line)

ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
return "\n".join(ansi_lines)


def get_legal_warning() -> str:
legal_text = """
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
## Introduction
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
## No Liability for Actions of the System
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
## User Responsibility and Respondeat Superior Liability
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
## Indemnification
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
"""
return legal_text
10 changes: 9 additions & 1 deletion autogpt/processing/text.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,15 @@
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.llm.utils import count_string_tokens, create_chat_completion
from autogpt.logs import logger
from autogpt.utils import batch


def batch(iterable, max_batch_length: int, overlap: int = 0):
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if max_batch_length < 1:
raise ValueError("n must be at least one")
for i in range(0, len(iterable), max_batch_length - overlap):
yield iterable[i : i + max_batch_length]


def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
Expand Down
170 changes: 1 addition & 169 deletions autogpt/utils.py
Original file line number Diff line number Diff line change
@@ -1,70 +1,5 @@
import os
import re

import requests
import yaml
from colorama import Fore, Style
from git.repo import Repo
from prompt_toolkit import ANSI, PromptSession
from prompt_toolkit.history import InMemoryHistory

from autogpt.config import Config
from autogpt.logs import logger

session = PromptSession(history=InMemoryHistory())


def batch(iterable, max_batch_length: int, overlap: int = 0):
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if max_batch_length < 1:
raise ValueError("n must be at least one")
for i in range(0, len(iterable), max_batch_length - overlap):
yield iterable[i : i + max_batch_length]


def clean_input(config: Config, prompt: str = "", talk=False):
try:
if config.chat_messages_enabled:
for plugin in config.plugins:
if not hasattr(plugin, "can_handle_user_input"):
continue
if not plugin.can_handle_user_input(user_input=prompt):
continue
plugin_response = plugin.user_input(user_input=prompt)
if not plugin_response:
continue
if plugin_response.lower() in [
"yes",
"yeah",
"y",
"ok",
"okay",
"sure",
"alright",
]:
return config.authorise_key
elif plugin_response.lower() in [
"no",
"nope",
"n",
"negative",
]:
return config.exit_key
return plugin_response

# ask for input, default when just pressing Enter is y
logger.info("Asking user via keyboard...")

# handle_sigint must be set to False, so the signal handler in the
# autogpt/main.py could be employed properly. This referes to
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
answer = session.prompt(ANSI(prompt), handle_sigint=False)
return answer
except KeyboardInterrupt:
logger.info("You interrupted Auto-GPT")
logger.info("Quitting...")
exit(0)
from colorama import Fore


def validate_yaml_file(file: str):
Expand All @@ -80,106 +15,3 @@ def validate_yaml_file(file: str):
)

return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")


def readable_file_size(size, decimal_places=2):
"""Converts the given size in bytes to a readable format.
Args:
size: Size in bytes
decimal_places (int): Number of decimal places to display
"""
for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
break
size /= 1024.0
return f"{size:.{decimal_places}f} {unit}"


def get_bulletin_from_web():
try:
response = requests.get(
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
)
if response.status_code == 200:
return response.text
except requests.exceptions.RequestException:
pass

return ""


def get_current_git_branch() -> str:
try:
repo = Repo(search_parent_directories=True)
branch = repo.active_branch
return branch.name
except:
return ""


def get_latest_bulletin() -> tuple[str, bool]:
exists = os.path.exists("data/CURRENT_BULLETIN.md")
current_bulletin = ""
if exists:
current_bulletin = open(
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
).read()
new_bulletin = get_bulletin_from_web()
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin

news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
if new_bulletin or current_bulletin:
news_header += (
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
"If you don't wish to see this message, you "
"can run Auto-GPT with the *--skip-news* flag.\n"
)

if new_bulletin and is_new_news:
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"

return f"{news_header}\n{current_bulletin}", is_new_news


def markdown_to_ansi_style(markdown: str):
ansi_lines: list[str] = []
for line in markdown.split("\n"):
line_style = ""

if line.startswith("# "):
line_style += Style.BRIGHT
else:
line = re.sub(
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
rf"{Style.BRIGHT}\1{Style.NORMAL}",
line,
)

if re.match(r"^#+ ", line) is not None:
line_style += Fore.CYAN
line = re.sub(r"^#+ ", "", line)

ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
return "\n".join(ansi_lines)


def get_legal_warning() -> str:
legal_text = """
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
## Introduction
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
## No Liability for Actions of the System
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
## User Responsibility and Respondeat Superior Liability
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
## Indemnification
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
"""
return legal_text
4 changes: 3 additions & 1 deletion tests/challenges/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ def input_generator() -> Generator[str, None, None]:
yield from input_sequence

gen = input_generator()
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _, **kwargs: next(gen))
monkeypatch.setattr(
"autogpt.app.utils.session.prompt", lambda _, **kwargs: next(gen)
)


def setup_mock_log_cycle_agent_name(
Expand Down
Loading

0 comments on commit a593c32

Please sign in to comment.