Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Work on multiline input #12

Merged
merged 2 commits into from
Nov 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ python cli.py

The CLI client will prompt you to enter your input. The response from ChatGPT will be printed in the console.

You can also enable multiline mode with the `-m` or `--multiline` option. In this mode, you can input multiple
lines and input "SEND" when you are done.

Quit with either `q`, `x`, `exit` or `quit` as the input.

### GUI Client (WIP)
Expand Down
39 changes: 37 additions & 2 deletions cli.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,34 @@
#!/usr/bin/env python3
import argparse
import os
import readline

import core


def check_exit(user_input):
return user_input in ('q', 'x', 'quit', 'exit')


def cli_input():
user_input = input("> ")
if user_input in ('q', 'x', 'quit', 'exit'):
if check_exit(user_input):
return None
return user_input


def cli_input_multiline():
user_input = []
while True:
line = input("> ")
if line == "SEND":
break
user_input.append(line)
user_input = '\n'.join(user_input)

if check_exit(user_input):
return None

return user_input


Expand All @@ -17,4 +37,19 @@ def cli_output(msg, info):
print(info)


core.GptCore(cli_input, cli_output).main()
def main():
parser = argparse.ArgumentParser(description="Interact with OpenAI's GPT-4 model.")
parser.add_argument('-m', '--multiline', action='store_true',
help='Enable multiline input mode. Input "SEND" when you are done.')
args = parser.parse_args()

if args.multiline:
input_f = cli_input_multiline
else:
input_f = cli_input

core.GptCore(input_f, cli_output).main()


if __name__ == "__main__":
main()
28 changes: 28 additions & 0 deletions core.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,22 @@


class GptCore:
"""
A class to interact with OpenAI's GPT-4 model.

Attributes
----------
input : function
a function to get user input, takes no arguments, returns str or None
output : function
a function to output the model's response and info, takes str and Info
object, returns None

Methods
-------
main():
The main loop to interact with the model.
"""
def __init__(self, input, output):
self.input = input
self.output = output
Expand Down Expand Up @@ -42,6 +58,18 @@ def main(self):

@dataclass
class Info:
"""
A class to represent the information about the interaction with the model.

Attributes
----------
prompt_tokens : int
the number of tokens in the prompt
completion_tokens : int
the number of tokens in the completion
price : float
the total price of the interaction
"""
prompt_tokens: int
completion_tokens: int
price: float
Expand Down
28 changes: 28 additions & 0 deletions test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/usr/bin/env python3
import openai
import os
import sys

MODEL = "gpt-4"

os.chdir(os.path.dirname(__file__))

with open('.api_key', 'r') as f:
openai.api_key = f.read().strip()

prompt = " ".join(sys.argv[1:])

# System message can provide further control over tone and task
# There is a way how to send more advanced discussion too
messages = [
# {"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
# {"role": "user", "content": "Knock knock."},
# {"role": "assistant", "content": "Who's there?"},
# {"role": "user", "content": "Orange."}, # And model would proceed with "Orange who?"
]

response = openai.ChatCompletion.create(
model=MODEL, messages=messages, temperature=0.1)

print(response.choices[0]["message"]["content"].strip())
Loading