Skip to content

Commit

Permalink
Merge pull request OpenInterpreter#1465 from OpenInterpreter/development
Browse files Browse the repository at this point in the history
Development
  • Loading branch information
KillianLucas authored Sep 26, 2024
2 parents c81d910 + 547b180 commit df9293a
Show file tree
Hide file tree
Showing 9 changed files with 206 additions and 46 deletions.
129 changes: 100 additions & 29 deletions interpreter/core/async_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(self, *args, **kwargs):
self.server = Server(self)

# For the 01. This lets the OAI compatible server accumulate context before responding.
self.context_mode = True
self.context_mode = False

async def input(self, chunk):
"""
Expand Down Expand Up @@ -723,7 +723,39 @@ class ChatCompletionRequest(BaseModel):
temperature: Optional[float] = None
stream: Optional[bool] = False

async def openai_compatible_generator():
async def openai_compatible_generator(run_code):
if run_code:
print("Running code.\n")
for i, chunk in enumerate(async_interpreter._respond_and_store()):
if "content" in chunk:
print(chunk["content"], end="") # Sorry! Shitty display for now
if "start" in chunk:
print("\n")

output_content = None

if chunk["type"] == "message" and "content" in chunk:
output_content = chunk["content"]
if chunk["type"] == "code" and "start" in chunk:
output_content = " "
if chunk["type"] == "code" and "content" in chunk:
output_content = (
f"""<unvoiced code="{chunk["content"]}"></unvoiced>"""
)

if output_content:
await asyncio.sleep(0)
output_chunk = {
"id": i,
"object": "chat.completion.chunk",
"created": time.time(),
"model": "open-interpreter",
"choices": [{"delta": {"content": output_content}}],
}
yield f"data: {json.dumps(output_chunk)}\n\n"

return

made_chunk = False

for message in [
Expand All @@ -740,6 +772,12 @@ async def openai_compatible_generator():
await asyncio.sleep(0) # Yield control to the event loop
made_chunk = True

if (
chunk["type"] == "confirmation"
and async_interpreter.auto_run == False
):
break

if async_interpreter.stop_event.is_set():
break

Expand All @@ -749,6 +787,10 @@ async def openai_compatible_generator():
output_content = chunk["content"]
if chunk["type"] == "code" and "start" in chunk:
output_content = " "
if chunk["type"] == "code" and "content" in chunk:
output_content = (
f"""<unvoiced code="{chunk["content"]}"></unvoiced>"""
)

if output_content:
await asyncio.sleep(0)
Expand All @@ -764,6 +806,18 @@ async def openai_compatible_generator():
if made_chunk:
break

if async_interpreter.messages[-1]["type"] == "code":
await asyncio.sleep(0)
output_content = "{CODE_FINISHED}"
output_chunk = {
"id": i,
"object": "chat.completion.chunk",
"created": time.time(),
"model": "open-interpreter",
"choices": [{"delta": {"content": output_content}}],
}
yield f"data: {json.dumps(output_chunk)}\n\n"

@router.post("/openai/chat/completions")
async def chat_completion(request: ChatCompletionRequest):
global last_start_time
Expand All @@ -776,6 +830,9 @@ async def chat_completion(request: ChatCompletionRequest):

if last_message.content == "{STOP}":
# Handle special STOP token
async_interpreter.stop_event.set()
time.sleep(5)
async_interpreter.stop_event.clear()
return

if last_message.content in ["{CONTEXT_MODE_ON}", "{REQUIRE_START_ON}"]:
Expand All @@ -786,6 +843,14 @@ async def chat_completion(request: ChatCompletionRequest):
async_interpreter.context_mode = False
return

if last_message.content == "{AUTO_RUN_ON}":
async_interpreter.auto_run = True
return

if last_message.content == "{AUTO_RUN_OFF}":
async_interpreter.auto_run = False
return

if type(last_message.content) == str:
async_interpreter.messages.append(
{
Expand Down Expand Up @@ -825,43 +890,49 @@ async def chat_completion(request: ChatCompletionRequest):
}
)

if async_interpreter.context_mode:
# In context mode, we only respond if we recieved a {START} message
# Otherwise, we're just accumulating context
if last_message.content == "{START}":
if async_interpreter.messages[-1]["content"] == "{START}":
run_code = False
if last_message.content == "{RUN}":
run_code = True
# Remove that {RUN} message that would have just been added
async_interpreter.messages = async_interpreter.messages[:-1]
else:
if async_interpreter.context_mode:
# In context mode, we only respond if we recieved a {START} message
# Otherwise, we're just accumulating context
if last_message.content == "{START}":
if async_interpreter.messages[-1]["content"] == "{START}":
# Remove that {START} message that would have just been added
async_interpreter.messages = async_interpreter.messages[:-1]
last_start_time = time.time()
if (
async_interpreter.messages
and async_interpreter.messages[-1].get("role") != "user"
):
return
else:
# Check if we're within 6 seconds of last_start_time
current_time = time.time()
if current_time - last_start_time <= 6:
# Continue processing
pass
else:
# More than 6 seconds have passed, so return
return

else:
if last_message.content == "{START}":
# This just sometimes happens I guess
# Remove that {START} message that would have just been added
async_interpreter.messages = async_interpreter.messages[:-1]
last_start_time = time.time()
if (
async_interpreter.messages
and async_interpreter.messages[-1].get("role") != "user"
):
return
else:
# Check if we're within 6 seconds of last_start_time
current_time = time.time()
if current_time - last_start_time <= 6:
# Continue processing
pass
else:
# More than 6 seconds have passed, so return
return

else:
if last_message.content == "{START}":
# This just sometimes happens I guess
# Remove that {START} message that would have just been added
async_interpreter.messages = async_interpreter.messages[:-1]
return

async_interpreter.stop_event.set()
time.sleep(0.1)
async_interpreter.stop_event.clear()

if request.stream:
return StreamingResponse(
openai_compatible_generator(), media_type="application/x-ndjson"
openai_compatible_generator(run_code), media_type="application/x-ndjson"
)
else:
messages = async_interpreter.chat(message=".", stream=False, display=True)
Expand Down
13 changes: 10 additions & 3 deletions interpreter/core/computer/terminal/languages/jupyter_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,12 @@ def iopub_message_listener():
self.finish_flag = True
return
try:
input_patience = int(
os.environ.get("INTERPRETER_TERMINAL_INPUT_PATIENCE", 15)
)
if (
time.time() - self.last_output_time > 15
and time.time() - self.last_output_message_time > 15
time.time() - self.last_output_time > input_patience
and time.time() - self.last_output_message_time > input_patience
):
self.last_output_message_time = time.time()

Expand Down Expand Up @@ -364,7 +367,11 @@ def preprocess_python(code):

# Add print commands that tell us what the active line is
# but don't do this if any line starts with ! or %
if not any(line.strip().startswith(("!", "%")) for line in code.split("\n")):
if (
not any(line.strip().startswith(("!", "%")) for line in code.split("\n"))
and os.environ.get("INTERPRETER_ACTIVE_LINE_DETECTION", "True").lower()
== "true"
):
code = add_active_line_prints(code)

# Wrap in a try except (DISABLED)
Expand Down
6 changes: 5 additions & 1 deletion interpreter/core/computer/terminal/languages/shell.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,11 @@ def preprocess_shell(code):

# Add commands that tell us what the active line is
# if it's multiline, just skip this. soon we should make it work with multiline
if not has_multiline_commands(code):
if (
not has_multiline_commands(code)
and os.environ.get("INTERPRETER_ACTIVE_LINE_DETECTION", "True").lower()
== "true"
):
code = add_active_line_prints(code)

# Add end command (we'll be listening for this so we know when it ends)
Expand Down
2 changes: 1 addition & 1 deletion interpreter/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __init__(
"Let me know what you'd like to do next.",
"Please provide more information.",
],
disable_telemetry=os.getenv("DISABLE_TELEMETRY", "false").lower() == "true",
disable_telemetry=False,
in_terminal_interface=False,
conversation_history=True,
conversation_filename=None,
Expand Down
2 changes: 1 addition & 1 deletion interpreter/core/utils/truncate_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ def truncate_output(data, max_output_chars=2800, add_scrollbars=False):

needs_truncation = False

message = f"Output truncated. Showing the last {max_output_chars} characters.\n\n"
message = f"Output truncated. Showing the last {max_output_chars} characters. You should try again and use computer.ai.summarize(output) over the output, or break it down into smaller steps.\n\n"

# This won't work because truncated code is stored in interpreter.messages :/
# If the full code was stored, we could do this:
Expand Down
72 changes: 72 additions & 0 deletions interpreter/terminal_interface/profiles/defaults/aws-docs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""
This is an Open Interpreter profile. It is specialized for searching AWS documentation and is configured to run Anthropic's `Claude 3.5 Sonnet`.
"""

# Configure Open Interpreter
from interpreter import interpreter

interpreter.llm.model = "claude-3-5-sonnet-20240620"
interpreter.computer.import_computer_api = True
interpreter.llm.supports_functions = True
interpreter.llm.supports_vision = True
interpreter.llm.context_window = 100000
interpreter.llm.max_tokens = 4096

AWS_DOCS_SEARCH_URL = "https://docs.aws.amazon.com/search/doc-search.html?searchPath=documentation&searchQuery=<query>"

custom_tool = """
import os
import requests
def search_aws_docs(query):
url = "https://api.perplexity.ai/chat/completions"
payload = {
"model": "llama-3.1-sonar-small-128k-online",
"messages": [
{
"role": "system",
"content": "Be precise and concise."
},
{
"role": "user",
"content": query
}
],
"temperature": 0.2,
"top_p": 0.9,
"return_citations": True,
"search_domain_filter": ["docs.aws.amazon.com"],
"return_images": False,
"return_related_questions": False,
#"search_recency_filter": "month",
"top_k": 0,
"stream": False,
"presence_penalty": 0,
"frequency_penalty": 1
}
headers = {
"Authorization": f"Bearer {os.environ.get('PPLX_API_KEY')}",
"Content-Type": "application/json"
}
response = requests.request("POST", url, json=payload, headers=headers)
print(response.text)
return response.text
"""


interpreter.computer.run("python", custom_tool)

interpreter.custom_instructions = f"""
You have access to a special function imported inside your python environment, to be executed in python, called `search_aws_docs(query)` which lets you search the AWS docs.
Use it frequently to ground your usage of AWS products.
Use it often!
If the user wants you to open the docs, open their browser to the URL: {AWS_DOCS_SEARCH_URL}
"""
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
"""

"""
Required pip package:
pip install boto3>=1.28.57
Recommended pip package:
pip install boto3
Required environment variables:
Recommended environment variables:
os.environ["AWS_ACCESS_KEY_ID"] = "" # Access key
os.environ["AWS_SECRET_ACCESS_KEY"] = "" # Secret access key
os.environ["AWS_REGION_NAME"] = "" # us-east-1, us-east-2, us-west-1, us-west-2
Expand All @@ -20,7 +20,7 @@

interpreter.computer.import_computer_api = True

interpreter.llm.supports_functions = True
interpreter.llm.supports_vision = True
interpreter.llm.context_window = 100000
interpreter.llm.supports_functions = False
interpreter.llm.supports_vision = False
interpreter.llm.context_window = 10000
interpreter.llm.max_tokens = 4096
1 change: 1 addition & 0 deletions interpreter/terminal_interface/start_terminal_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,7 @@ def print_help(self, *args, **kwargs):
### Set attributes on interpreter, because the arguments passed in via the CLI should override profile

set_attributes(args, arguments)
interpreter.disable_telemetry=os.getenv("DISABLE_TELEMETRY", "false").lower() == "true" or args.disable_telemetry

### Set some helpful settings we know are likely to be true

Expand Down
15 changes: 10 additions & 5 deletions interpreter/terminal_interface/terminal_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,16 @@ def terminal_interface(interpreter, message):
interpreter.messages = interpreter.messages[:-1]
else:
### This is the primary input for Open Interpreter.
message = (
cli_input("> ").strip()
if interpreter.multi_line
else input("> ").strip()
)
try:
message = (
cli_input("> ").strip()
if interpreter.multi_line
else input("> ").strip()
)
except (KeyboardInterrupt, EOFError):
# Treat Ctrl-D on an empty line the same as Ctrl-C by exiting gracefully
interpreter.display_message("\n\n`Exiting...`")
raise KeyboardInterrupt

try:
# This lets users hit the up arrow key for past messages
Expand Down

0 comments on commit df9293a

Please sign in to comment.