diff --git a/interpreter/core/async_core.py b/interpreter/core/async_core.py
index e1f6785394..7b1b9f62d9 100644
--- a/interpreter/core/async_core.py
+++ b/interpreter/core/async_core.py
@@ -60,7 +60,7 @@ def __init__(self, *args, **kwargs):
self.server = Server(self)
# For the 01. This lets the OAI compatible server accumulate context before responding.
- self.context_mode = True
+ self.context_mode = False
async def input(self, chunk):
"""
@@ -723,7 +723,39 @@ class ChatCompletionRequest(BaseModel):
temperature: Optional[float] = None
stream: Optional[bool] = False
- async def openai_compatible_generator():
+ async def openai_compatible_generator(run_code):
+ if run_code:
+ print("Running code.\n")
+ for i, chunk in enumerate(async_interpreter._respond_and_store()):
+ if "content" in chunk:
+ print(chunk["content"], end="") # Sorry! Shitty display for now
+ if "start" in chunk:
+ print("\n")
+
+ output_content = None
+
+ if chunk["type"] == "message" and "content" in chunk:
+ output_content = chunk["content"]
+ if chunk["type"] == "code" and "start" in chunk:
+ output_content = " "
+ if chunk["type"] == "code" and "content" in chunk:
+ output_content = (
+ f""""""
+ )
+
+ if output_content:
+ await asyncio.sleep(0)
+ output_chunk = {
+ "id": i,
+ "object": "chat.completion.chunk",
+ "created": time.time(),
+ "model": "open-interpreter",
+ "choices": [{"delta": {"content": output_content}}],
+ }
+ yield f"data: {json.dumps(output_chunk)}\n\n"
+
+ return
+
made_chunk = False
for message in [
@@ -740,6 +772,12 @@ async def openai_compatible_generator():
await asyncio.sleep(0) # Yield control to the event loop
made_chunk = True
+ if (
+ chunk["type"] == "confirmation"
+ and async_interpreter.auto_run == False
+ ):
+ break
+
if async_interpreter.stop_event.is_set():
break
@@ -749,6 +787,10 @@ async def openai_compatible_generator():
output_content = chunk["content"]
if chunk["type"] == "code" and "start" in chunk:
output_content = " "
+ if chunk["type"] == "code" and "content" in chunk:
+ output_content = (
+ f""""""
+ )
if output_content:
await asyncio.sleep(0)
@@ -764,6 +806,18 @@ async def openai_compatible_generator():
if made_chunk:
break
+ if async_interpreter.messages[-1]["type"] == "code":
+ await asyncio.sleep(0)
+ output_content = "{CODE_FINISHED}"
+ output_chunk = {
+ "id": i,
+ "object": "chat.completion.chunk",
+ "created": time.time(),
+ "model": "open-interpreter",
+ "choices": [{"delta": {"content": output_content}}],
+ }
+ yield f"data: {json.dumps(output_chunk)}\n\n"
+
@router.post("/openai/chat/completions")
async def chat_completion(request: ChatCompletionRequest):
global last_start_time
@@ -776,6 +830,9 @@ async def chat_completion(request: ChatCompletionRequest):
if last_message.content == "{STOP}":
# Handle special STOP token
+ async_interpreter.stop_event.set()
+ time.sleep(5)
+ async_interpreter.stop_event.clear()
return
if last_message.content in ["{CONTEXT_MODE_ON}", "{REQUIRE_START_ON}"]:
@@ -786,6 +843,14 @@ async def chat_completion(request: ChatCompletionRequest):
async_interpreter.context_mode = False
return
+ if last_message.content == "{AUTO_RUN_ON}":
+ async_interpreter.auto_run = True
+ return
+
+ if last_message.content == "{AUTO_RUN_OFF}":
+ async_interpreter.auto_run = False
+ return
+
if type(last_message.content) == str:
async_interpreter.messages.append(
{
@@ -825,43 +890,49 @@ async def chat_completion(request: ChatCompletionRequest):
}
)
- if async_interpreter.context_mode:
- # In context mode, we only respond if we recieved a {START} message
- # Otherwise, we're just accumulating context
- if last_message.content == "{START}":
- if async_interpreter.messages[-1]["content"] == "{START}":
+ run_code = False
+ if last_message.content == "{RUN}":
+ run_code = True
+ # Remove that {RUN} message that would have just been added
+ async_interpreter.messages = async_interpreter.messages[:-1]
+ else:
+ if async_interpreter.context_mode:
+ # In context mode, we only respond if we recieved a {START} message
+ # Otherwise, we're just accumulating context
+ if last_message.content == "{START}":
+ if async_interpreter.messages[-1]["content"] == "{START}":
+ # Remove that {START} message that would have just been added
+ async_interpreter.messages = async_interpreter.messages[:-1]
+ last_start_time = time.time()
+ if (
+ async_interpreter.messages
+ and async_interpreter.messages[-1].get("role") != "user"
+ ):
+ return
+ else:
+ # Check if we're within 6 seconds of last_start_time
+ current_time = time.time()
+ if current_time - last_start_time <= 6:
+ # Continue processing
+ pass
+ else:
+ # More than 6 seconds have passed, so return
+ return
+
+ else:
+ if last_message.content == "{START}":
+ # This just sometimes happens I guess
# Remove that {START} message that would have just been added
async_interpreter.messages = async_interpreter.messages[:-1]
- last_start_time = time.time()
- if (
- async_interpreter.messages
- and async_interpreter.messages[-1].get("role") != "user"
- ):
- return
- else:
- # Check if we're within 6 seconds of last_start_time
- current_time = time.time()
- if current_time - last_start_time <= 6:
- # Continue processing
- pass
- else:
- # More than 6 seconds have passed, so return
return
- else:
- if last_message.content == "{START}":
- # This just sometimes happens I guess
- # Remove that {START} message that would have just been added
- async_interpreter.messages = async_interpreter.messages[:-1]
- return
-
async_interpreter.stop_event.set()
time.sleep(0.1)
async_interpreter.stop_event.clear()
if request.stream:
return StreamingResponse(
- openai_compatible_generator(), media_type="application/x-ndjson"
+ openai_compatible_generator(run_code), media_type="application/x-ndjson"
)
else:
messages = async_interpreter.chat(message=".", stream=False, display=True)
diff --git a/interpreter/core/computer/terminal/languages/jupyter_language.py b/interpreter/core/computer/terminal/languages/jupyter_language.py
index a56a37ff75..56b7ce438e 100644
--- a/interpreter/core/computer/terminal/languages/jupyter_language.py
+++ b/interpreter/core/computer/terminal/languages/jupyter_language.py
@@ -149,9 +149,12 @@ def iopub_message_listener():
self.finish_flag = True
return
try:
+ input_patience = int(
+ os.environ.get("INTERPRETER_TERMINAL_INPUT_PATIENCE", 15)
+ )
if (
- time.time() - self.last_output_time > 15
- and time.time() - self.last_output_message_time > 15
+ time.time() - self.last_output_time > input_patience
+ and time.time() - self.last_output_message_time > input_patience
):
self.last_output_message_time = time.time()
@@ -364,7 +367,11 @@ def preprocess_python(code):
# Add print commands that tell us what the active line is
# but don't do this if any line starts with ! or %
- if not any(line.strip().startswith(("!", "%")) for line in code.split("\n")):
+ if (
+ not any(line.strip().startswith(("!", "%")) for line in code.split("\n"))
+ and os.environ.get("INTERPRETER_ACTIVE_LINE_DETECTION", "True").lower()
+ == "true"
+ ):
code = add_active_line_prints(code)
# Wrap in a try except (DISABLED)
diff --git a/interpreter/core/computer/terminal/languages/shell.py b/interpreter/core/computer/terminal/languages/shell.py
index 9f71e53e5f..6b900b47e2 100644
--- a/interpreter/core/computer/terminal/languages/shell.py
+++ b/interpreter/core/computer/terminal/languages/shell.py
@@ -45,7 +45,11 @@ def preprocess_shell(code):
# Add commands that tell us what the active line is
# if it's multiline, just skip this. soon we should make it work with multiline
- if not has_multiline_commands(code):
+ if (
+ not has_multiline_commands(code)
+ and os.environ.get("INTERPRETER_ACTIVE_LINE_DETECTION", "True").lower()
+ == "true"
+ ):
code = add_active_line_prints(code)
# Add end command (we'll be listening for this so we know when it ends)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index 0f3e53247f..6b348b63fb 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -57,7 +57,7 @@ def __init__(
"Let me know what you'd like to do next.",
"Please provide more information.",
],
- disable_telemetry=os.getenv("DISABLE_TELEMETRY", "false").lower() == "true",
+ disable_telemetry=False,
in_terminal_interface=False,
conversation_history=True,
conversation_filename=None,
diff --git a/interpreter/core/utils/truncate_output.py b/interpreter/core/utils/truncate_output.py
index 06f7402289..628ff504e3 100644
--- a/interpreter/core/utils/truncate_output.py
+++ b/interpreter/core/utils/truncate_output.py
@@ -4,7 +4,7 @@ def truncate_output(data, max_output_chars=2800, add_scrollbars=False):
needs_truncation = False
- message = f"Output truncated. Showing the last {max_output_chars} characters.\n\n"
+ message = f"Output truncated. Showing the last {max_output_chars} characters. You should try again and use computer.ai.summarize(output) over the output, or break it down into smaller steps.\n\n"
# This won't work because truncated code is stored in interpreter.messages :/
# If the full code was stored, we could do this:
diff --git a/interpreter/terminal_interface/profiles/defaults/aws-docs.py b/interpreter/terminal_interface/profiles/defaults/aws-docs.py
new file mode 100644
index 0000000000..5dd8ab40c5
--- /dev/null
+++ b/interpreter/terminal_interface/profiles/defaults/aws-docs.py
@@ -0,0 +1,72 @@
+"""
+This is an Open Interpreter profile. It is specialized for searching AWS documentation and is configured to run Anthropic's `Claude 3.5 Sonnet`.
+"""
+
+# Configure Open Interpreter
+from interpreter import interpreter
+
+interpreter.llm.model = "claude-3-5-sonnet-20240620"
+interpreter.computer.import_computer_api = True
+interpreter.llm.supports_functions = True
+interpreter.llm.supports_vision = True
+interpreter.llm.context_window = 100000
+interpreter.llm.max_tokens = 4096
+
+AWS_DOCS_SEARCH_URL = "https://docs.aws.amazon.com/search/doc-search.html?searchPath=documentation&searchQuery="
+
+custom_tool = """
+
+import os
+import requests
+
+def search_aws_docs(query):
+
+ url = "https://api.perplexity.ai/chat/completions"
+
+ payload = {
+ "model": "llama-3.1-sonar-small-128k-online",
+ "messages": [
+ {
+ "role": "system",
+ "content": "Be precise and concise."
+ },
+ {
+ "role": "user",
+ "content": query
+ }
+ ],
+ "temperature": 0.2,
+ "top_p": 0.9,
+ "return_citations": True,
+ "search_domain_filter": ["docs.aws.amazon.com"],
+ "return_images": False,
+ "return_related_questions": False,
+ #"search_recency_filter": "month",
+ "top_k": 0,
+ "stream": False,
+ "presence_penalty": 0,
+ "frequency_penalty": 1
+ }
+ headers = {
+ "Authorization": f"Bearer {os.environ.get('PPLX_API_KEY')}",
+ "Content-Type": "application/json"
+ }
+
+ response = requests.request("POST", url, json=payload, headers=headers)
+
+ print(response.text)
+
+ return response.text
+
+"""
+
+
+interpreter.computer.run("python", custom_tool)
+
+interpreter.custom_instructions = f"""
+You have access to a special function imported inside your python environment, to be executed in python, called `search_aws_docs(query)` which lets you search the AWS docs.
+Use it frequently to ground your usage of AWS products.
+Use it often!
+
+If the user wants you to open the docs, open their browser to the URL: {AWS_DOCS_SEARCH_URL}
+"""
diff --git a/interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py b/interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py
index dfe591ce33..cc649feb04 100644
--- a/interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py
+++ b/interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py
@@ -3,10 +3,10 @@
"""
"""
-Required pip package:
-pip install boto3>=1.28.57
+Recommended pip package:
+pip install boto3
-Required environment variables:
+Recommended environment variables:
os.environ["AWS_ACCESS_KEY_ID"] = "" # Access key
os.environ["AWS_SECRET_ACCESS_KEY"] = "" # Secret access key
os.environ["AWS_REGION_NAME"] = "" # us-east-1, us-east-2, us-west-1, us-west-2
@@ -20,7 +20,7 @@
interpreter.computer.import_computer_api = True
-interpreter.llm.supports_functions = True
-interpreter.llm.supports_vision = True
-interpreter.llm.context_window = 100000
+interpreter.llm.supports_functions = False
+interpreter.llm.supports_vision = False
+interpreter.llm.context_window = 10000
interpreter.llm.max_tokens = 4096
diff --git a/interpreter/terminal_interface/start_terminal_interface.py b/interpreter/terminal_interface/start_terminal_interface.py
index 14a02f64c6..70a67fcdfd 100644
--- a/interpreter/terminal_interface/start_terminal_interface.py
+++ b/interpreter/terminal_interface/start_terminal_interface.py
@@ -478,6 +478,7 @@ def print_help(self, *args, **kwargs):
### Set attributes on interpreter, because the arguments passed in via the CLI should override profile
set_attributes(args, arguments)
+ interpreter.disable_telemetry=os.getenv("DISABLE_TELEMETRY", "false").lower() == "true" or args.disable_telemetry
### Set some helpful settings we know are likely to be true
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index bf019eb03b..d0cb367924 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -92,11 +92,16 @@ def terminal_interface(interpreter, message):
interpreter.messages = interpreter.messages[:-1]
else:
### This is the primary input for Open Interpreter.
- message = (
- cli_input("> ").strip()
- if interpreter.multi_line
- else input("> ").strip()
- )
+ try:
+ message = (
+ cli_input("> ").strip()
+ if interpreter.multi_line
+ else input("> ").strip()
+ )
+ except (KeyboardInterrupt, EOFError):
+ # Treat Ctrl-D on an empty line the same as Ctrl-C by exiting gracefully
+ interpreter.display_message("\n\n`Exiting...`")
+ raise KeyboardInterrupt
try:
# This lets users hit the up arrow key for past messages