diff --git a/docs/ROADMAP.md b/docs/ROADMAP.md index f1311dd1d8..a4aca7ef18 100644 --- a/docs/ROADMAP.md +++ b/docs/ROADMAP.md @@ -9,7 +9,7 @@ - [ ] Figure out how to get OI to answer to user input requests like python's `input()`. Do we somehow detect a delay in the output..? Is there some universal flag that TUIs emit when they expect user input? Should we do this semantically with embeddings, then ask OI to review it and respond..? - [ ] Placeholder text that gives a compelling example OI request. Probably use `textual` - [ ] Everything else `textual` offers, like could we make it easier to select text? Copy paste in and out? Code editing interface? -- [ ] Let people turn off the active line highlighting +- [x] Let people turn off the active line highlighting - [ ] Add a --plain flag which doesn't use rich, just prints stuff in plain text - [ ] Use iPython stuff to track the active line, instead of inserting print statements, which makes debugging weird (From ChatGPT: For deeper insights into what's happening behind the scenes, including which line of code is being executed, you can increase the logging level of the IPython kernel. You can configure the kernel's logger to a more verbose setting, which logs each execution request. However, this requires modifying the kernel's startup settings, which might involve changing logging configurations in the IPython kernel source or when launching the kernel.) - [ ] Let people edit the code OI writes. Could just open it in the user's preferred editor. Simple. [Full description of how to implement this here.](https://github.com/KillianLucas/open-interpreter/pull/830#issuecomment-1854989795) diff --git a/docs/guides/advanced-terminal-usage.mdx b/docs/guides/advanced-terminal-usage.mdx index d8baad05db..0a46dea8e1 100644 --- a/docs/guides/advanced-terminal-usage.mdx +++ b/docs/guides/advanced-terminal-usage.mdx @@ -13,4 +13,5 @@ Magic commands can be used to control the interpreter's behavior in interactive - `%tokens [prompt]`: EXPERIMENTAL: Calculate the tokens used by the next request based on the current conversation's messages and estimate the cost of that request; optionally provide a prompt to also calculate the tokens used by that prompt and the total amount of tokens that will be sent with the next request. - `%info`: Show system and interpreter information. - `%help`: Show this help message. -- `%jupyter`: Export the current session to a Jupyter notebook file (.ipynb) to the Downloads folder. \ No newline at end of file +- `%jupyter`: Export the current session to a Jupyter notebook file (.ipynb) to the Downloads folder. +- `%markdown [path]`: Export the conversation to a specified Markdown path. If no path is provided, it will be saved to the Downloads folder with a generated conversation name. \ No newline at end of file diff --git a/docs/usage/terminal/magic-commands.mdx b/docs/usage/terminal/magic-commands.mdx index 31fb9ab695..98a7fd7b16 100644 --- a/docs/usage/terminal/magic-commands.mdx +++ b/docs/usage/terminal/magic-commands.mdx @@ -13,3 +13,4 @@ Magic commands can be used to control the interpreter's behavior in interactive - `%tokens [prompt]`: EXPERIMENTAL: Calculate the tokens used by the next request based on the current conversation's messages and estimate the cost of that request; optionally provide a prompt to also calculate the tokens used by that prompt and the total amount of tokens that will be sent with the next request. - `%info`: Show system and interpreter information. - `%help`: Show this help message. +- `%markdown [path]`: Export the conversation to a specified Markdown path. If no path is provided, it will be saved to the Downloads folder with a generated conversation name. diff --git a/interpreter/core/computer/terminal/languages/jupyter_language.py b/interpreter/core/computer/terminal/languages/jupyter_language.py index 3a144a5d7e..b8f4d7aa34 100644 --- a/interpreter/core/computer/terminal/languages/jupyter_language.py +++ b/interpreter/core/computer/terminal/languages/jupyter_language.py @@ -5,6 +5,7 @@ import ast import logging +import sys import os import queue import re @@ -18,6 +19,17 @@ DEBUG_MODE = False +# When running from an executable, ipykernel calls itself infinitely +# This is a workaround to detect it and launch it manually +if 'ipykernel_launcher' in sys.argv: + if sys.path[0] == '': + del sys.path[0] + + from ipykernel import kernelapp as app + + app.launch_new_instance() + sys.exit(0) + class JupyterLanguage(BaseLanguage): file_extension = "py" diff --git a/interpreter/core/core.py b/interpreter/core/core.py index 26bef75942..431168725e 100644 --- a/interpreter/core/core.py +++ b/interpreter/core/core.py @@ -99,6 +99,7 @@ def __init__( self.multi_line = multi_line self.contribute_conversation = contribute_conversation self.plain_text_display = plain_text_display + self.highlight_active_line = True # additional setting to toggle active line highlighting. Defaults to True # Loop messages self.loop = loop diff --git a/interpreter/core/llm/run_tool_calling_llm.py b/interpreter/core/llm/run_tool_calling_llm.py index b39c3928ac..51581b16e6 100644 --- a/interpreter/core/llm/run_tool_calling_llm.py +++ b/interpreter/core/llm/run_tool_calling_llm.py @@ -104,14 +104,6 @@ def run_tool_calling_llm(llm, request_params): ] request_params["tools"] = [tool_schema] - import pprint - - pprint.pprint( - [str(m)[:600] if len(str(m)) > 1000 else m for m in request_params["messages"]] - ) - - print("PROCESSING") - request_params["messages"] = process_messages(request_params["messages"]) # # This makes any role: tool have the ID of the last tool call @@ -165,12 +157,6 @@ def run_tool_calling_llm(llm, request_params): # del messages[i] # request_params["messages"] = messages - import pprint - - pprint.pprint( - [str(m)[:600] if len(str(m)) > 1000 else m for m in request_params["messages"]] - ) - # Add OpenAI's recommended function message # request_params["messages"][0][ # "content" diff --git a/interpreter/terminal_interface/components/code_block.py b/interpreter/terminal_interface/components/code_block.py index 8506b9cb36..a286047e14 100644 --- a/interpreter/terminal_interface/components/code_block.py +++ b/interpreter/terminal_interface/components/code_block.py @@ -12,10 +12,13 @@ class CodeBlock(BaseBlock): Code Blocks display code and outputs in different languages. You can also set the active_line! """ - def __init__(self): + def __init__(self, interpreter=None): super().__init__() self.type = "code" + self.highlight_active_line = ( + interpreter.highlight_active_line if interpreter else None + ) # Define these for IDE auto-completion self.language = "" @@ -42,14 +45,22 @@ def refresh(self, cursor=True): ) code_table.add_column() - # Add cursor - if cursor: + # Add cursor only if active line highliting is true + if cursor and ( + self.highlight_active_line + if self.highlight_active_line is not None + else True + ): code += "●" # Add each line of code to the table code_lines = code.strip().split("\n") for i, line in enumerate(code_lines, start=1): - if i == self.active_line: + if i == self.active_line and ( + self.highlight_active_line + if self.highlight_active_line is not None + else True + ): # This is the active line, print it with a white background syntax = Syntax( line, self.language, theme="bw", line_numbers=False, word_wrap=True diff --git a/interpreter/terminal_interface/magic_commands.py b/interpreter/terminal_interface/magic_commands.py index 2801344110..fc1c6a9cae 100644 --- a/interpreter/terminal_interface/magic_commands.py +++ b/interpreter/terminal_interface/magic_commands.py @@ -8,6 +8,7 @@ from ..core.utils.system_debug_info import system_info from .utils.count_tokens import count_messages_tokens from .utils.display_markdown_message import display_markdown_message +from .utils.export_to_markdown import export_to_markdown def handle_undo(self, arguments): @@ -58,6 +59,7 @@ def handle_help(self, arguments): "%help": "Show this help message.", "%info": "Show system and interpreter information", "%jupyter": "Export the conversation to a Jupyter notebook file", + "%markdown [path]": "Export the conversation to a specified Markdown path. If no path is provided, it will be saved to the Downloads folder with a generated conversation name.", } base_message = ["> **Available Commands:**\n\n"] @@ -220,6 +222,9 @@ def get_downloads_path(): else: # For MacOS and Linux downloads = os.path.join(os.path.expanduser("~"), "Downloads") + # For some GNU/Linux distros, there's no '~/Downloads' dir by default + if not os.path.exists(downloads): + os.makedirs(downloads) return downloads @@ -295,6 +300,19 @@ def jupyter(self, arguments): ) +def markdown(self, export_path: str): + # If it's an empty conversations + if len(self.messages) == 0: + print("No messages to export.") + return + + # If user doesn't specify the export path, then save the exported PDF in '~/Downloads' + if not export_path: + export_path = get_downloads_path() + f"/{self.conversation_filename[:-4]}md" + + export_to_markdown(self.messages, export_path) + + def handle_magic_command(self, user_input): # Handle shell if user_input.startswith("%%"): @@ -316,6 +334,7 @@ def handle_magic_command(self, user_input): "tokens": handle_count_tokens, "info": handle_info, "jupyter": jupyter, + "markdown": markdown, } user_input = user_input[1:].strip() # Capture the part after the `%` diff --git a/interpreter/terminal_interface/start_terminal_interface.py b/interpreter/terminal_interface/start_terminal_interface.py index 87191f200d..6a1645bbba 100644 --- a/interpreter/terminal_interface/start_terminal_interface.py +++ b/interpreter/terminal_interface/start_terminal_interface.py @@ -50,6 +50,14 @@ def start_terminal_interface(interpreter): "type": bool, "attribute": {"object": interpreter, "attr_name": "auto_run"}, }, + { + "name": "no_highlight_active_line", + "nickname": "nhl", + "help_text": "turn off active line highlighting in code blocks", + "type": bool, + "action": "store_true", + "default": False, # Default to False, meaning highlighting is on by default + }, { "name": "verbose", "nickname": "v", @@ -381,6 +389,9 @@ def print_help(self, *args, **kwargs): print(f"Open Interpreter {version} {update_name}") return + if args.no_highlight_active_line: + interpreter.highlight_active_line = False + # if safe_mode and auto_run are enabled, safe_mode disables auto_run if interpreter.auto_run and ( interpreter.safe_mode == "ask" or interpreter.safe_mode == "auto" diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py index d8dd566d98..9b9f6fd7e3 100644 --- a/interpreter/terminal_interface/terminal_interface.py +++ b/interpreter/terminal_interface/terminal_interface.py @@ -221,7 +221,7 @@ def terminal_interface(interpreter, message): if response.strip().lower() == "y": # Create a new, identical block where the code will actually be run # Conveniently, the chunk includes everything we need to do this: - active_block = CodeBlock() + active_block = CodeBlock(interpreter) active_block.margin_top = False # <- Aesthetic choice active_block.language = language active_block.code = code diff --git a/interpreter/terminal_interface/utils/export_to_markdown.py b/interpreter/terminal_interface/utils/export_to_markdown.py new file mode 100644 index 0000000000..638e39db95 --- /dev/null +++ b/interpreter/terminal_interface/utils/export_to_markdown.py @@ -0,0 +1,37 @@ +def export_to_markdown(messages: list[dict], export_path: str): + markdown = messages_to_markdown(messages) + with open(export_path, 'w') as f: + f.write(markdown) + print(f"Exported current conversation to {export_path}") + + +def messages_to_markdown(messages: list[dict]) -> str: + # Convert interpreter.messages to Markdown text + markdown_content = "" + previous_role = None + for chunk in messages: + current_role = chunk["role"] + if current_role == previous_role: + rendered_chunk = "" + else: + rendered_chunk = f"## {current_role}\n\n" + previous_role = current_role + + # User query message + if chunk["role"] == "user": + rendered_chunk += chunk["content"] + "\n\n" + markdown_content += rendered_chunk + continue + + # Message + if chunk["type"] == "message": + rendered_chunk += chunk["content"] + "\n\n" + + # Code + if chunk["type"] == "code" or chunk["type"] == "console": + code_format = chunk.get("format", "") + rendered_chunk += f"```{code_format}\n{chunk['content']}\n```\n\n" + + markdown_content += rendered_chunk + + return markdown_content