Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Commands #17

Open
diegozea opened this issue Mar 20, 2023 · 2 comments
Open

Commands #17

diegozea opened this issue Mar 20, 2023 · 2 comments

Comments

@diegozea
Copy link

Hi!

Thanks a lot for this package. I think it would be great to add commands into this REPL mode.
I would suggest using the commands I am using in the following Python script:

image

#!/usr/bin/env python3
import openai
import pyperclip
import os
import platform

# Function to return a string in bold
def bold(string):
    return f"\033[1m{string}\033[0m"

# Function to return a string in italics
def italic(string):
    return f"\033[3m{string}\033[0m"

# Function to centre the text in the terminal window
def centre(string):
    # Get the width of the terminal window
    width = os.get_terminal_size().columns

    # If the string is longer than the width of the terminal window, return the string as is
    if len(string) >= width:
        return string

    # Calculate the number of spaces to add to the left and right of the string
    padding = (width - len(string)) // 2

    # Return the string with the calculated padding on either side
    return " " * padding + string

user_prompt = bold('You:') + '\n'
ai_prompt = bold('AI assistant:') + '\n'

# Set up OpenAI API key
with open("/home/diego/.llave") as fh:
    api_key = fh.readline().strip()
openai.api_key = api_key

# Function to send a message to the OpenAI chatbot model and return its response
def send_message(message_log):
    # Use OpenAI's ChatCompletion API to get the chatbot's response
    response = openai.ChatCompletion.create(
        model=
        "gpt-3.5-turbo-0301",  # The name of the OpenAI chatbot model to use
        messages=
        message_log,  # The conversation history up to this point, as a list of dictionaries
        stop=
        None,  # The stopping sequence for the generated response, if any (not used here)
        temperature=
        0.7,  # The "creativity" of the generated response (higher temperature = more creative)
    )

    # Find the first response from the chatbot that has text in it (some responses may not have text)
    for choice in response.choices:
        if "text" in choice:
            return choice.text

    # If no response with text is found, return the first response's content (which may be empty)
    return response.choices[0].message.content

# If the user types "quit", end the loop and print a goodbye message
def quit_check(user_input):
    if user_input in ["q", "quit"]:
        print(f"{bold('Goodbye!')}\n\n")
        return True
    return False

# Create a markdown string from the conversation history
def create_markdown(message_log):
    markdown = ""
    for message in message_log:
        if message["role"] != "system":
            if message["role"] == "user":
                markdown += f"You\n---\n\n"
            elif message["role"] == "assistant":
                markdown += f"ChatGPT\n-------\n\n"       
            markdown += f"{message['content']}\n\n"
    return markdown

def interaction(message_log, memory):
    user_input = multi_input(f"{user_prompt} ")
    if not user_input:
        return True

    if quit_check(user_input):
        return False
    
    if user_input in ["m", "memory"]:
        memory = not memory
        if memory:
            print(italic("( memory enabled )\n"))
        else:
            print(italic("( memory disabled )\n"))
        return True

    if not memory or user_input in ["d", "delete"]:
        message_log.clear()
        message_log.extend(initialize_conversation())
        return True
    
    if user_input in ["c", "copy"]:
        markdown = create_markdown(message_log)
        pyperclip.copy(markdown)
        print(italic("( markdown copied to clipboard )\n"))
        return True
    
    if user_input in ["l", "last", "copy last"]:
        markdown = [message_log[-1]]
        pyperclip.copy(markdown)
        print(italic("( last response copied to clipboard )\n"))
        return True

    if user_input.startswith("save ") or user_input.startswith("s "):
        if user_input.startswith("save "):
            user_input = user_input.replace("save ", "s ")
        filename = user_input[2:].strip()
        if not filename.endswith(".md"):
            filename += ".md"
        basename = filename.replace(".md", "")
        markdown = f"{basename}\n{'=' * len(basename)}\n\n"
        markdown += create_markdown(message_log)
        filename = filename.replace(" ", "_")
        with open(filename, 'w') as fh:
            fh.write(markdown)
        print(italic(f"( markdown saved to {filename} )\n"))
        return True
    
    if user_input in ["r", "regenerate"]:
        # delete the last message from the log
        message_log.pop()
    else:
        message_log.append({"role": "user", "content": user_input})
    
    print(ai_prompt)
    # Send the conversation history to the chatbot and get its response
    response = send_message(message_log)

    # Add the chatbot's response to the conversation history and print it to the console
    message_log.append({"role": "assistant", "content": response})
    print(f"{response}\n")

    return True

# Function to replace input that accept multiple lines (ending with two blank
# lines instead of enter)
def multi_input(prompt, n_lines=1):
    lines = []
    continue_prompt = True
    empty_lines = 0
    print(prompt)
    while continue_prompt:
        line = input('')
        if line:
            lines.append(line)
        else:
            empty_lines += 1
            if empty_lines == n_lines and lines:
                continue_prompt = False
    return ''.join(lines)

def show_header():
    print()
    print(centre(bold('ChatGPT')))
    print()
    print(f'Type {bold("m")} or {bold("memory")} to toggle memory (enabled by default).')
    print(f'Type {bold("r")} or {bold("regenerate")} to regenerate the last response.')
    print(f'Type {bold("d")} or {bold("delete")} to delete the history.')
    print(f'Type {bold("c")} or {bold("copy")} to copy the conversation history.')
    print(f'Type {bold("l")} or {bold("last")} or {bold("copy last")} to copy the last response.')
    print(f'Type {bold("s")} or {bold("save")} {italic("<filename.md>")} to save the conversation.')
    print(f'Type {bold("q")} or {bold("quit")} to exit.')
    print(f'Type enter two times to send the message.')
    print()

def initialize_conversation():
    return [{
        "role": "system",
        "content": "You are a helpful assistant."
    }]

# Main function that runs the chatbot
def main():
    show_header()

    # Initialize the conversation history with a message from the chatbot
    message_log = initialize_conversation()

    memory = True

    keep_running = True

    # Start a loop that runs until the user types "quit" or "q"
    while keep_running:
        keep_running = interaction(message_log, memory)


# Call the main function if this file is executed directly (not imported as a module)
if __name__ == "__main__":
    main()
@diegozea
Copy link
Author

#12 (comment)

@FedeClaudi
Copy link

@diegozea I missed your comment in #12 (comment)

I think having these commands would be very useful indeed!
Term.jl could be used to print out a nicely styled header message when the REPL mode is activated outlining the instructions 😁

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants