Skip to content

Commit

Permalink
Merge pull request #110 from mraniki/dev
Browse files Browse the repository at this point in the history
♻️ Refactor default_settings.toml and main.py
  • Loading branch information
mraniki authored Sep 24, 2023
2 parents 716b8fb + a5757d2 commit c1e464a
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 25 deletions.
15 changes: 6 additions & 9 deletions myllm/default_settings.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,20 @@
VALUE = "On default"

# Module Enable/Disable
llm_enabled = true
myllm_enabled = true

# LLM Model to use
llm_model = "gpt-3.5-turbo"

# LLM Provider
# Refer to https://github.com/xtekky/gpt4free
# for the list of supported provider
# llm_provider = "g4f.Provider.ChatgptAi"
llm_provider = "g4f.Provider.Bing"

# Number of conversation history
# between user and ai
max_memory = 5

# help message listing the commands
# available
llm_commands = """
Expand All @@ -35,14 +39,7 @@ llm_commands = """
➰ /aimode\n
🧽 /clearai\n
"""
# command for topic/conversation
bot_command_question = "qq"
# command for reset chat
bot_command_clearai = "clearai"

# command to activate or deactivate aimode
# not implemented
bot_command_aimode = "aimode"
llm_ai_mode = false

# template prompt context
Expand Down
29 changes: 13 additions & 16 deletions myllm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __init__(self):
None
"""

self.enabled = settings.llm_enabled
self.enabled = settings.myllm_enabled
if not self.enabled:
return
self.commands = settings.llm_commands
Expand All @@ -56,6 +56,15 @@ def __init__(self):
self.provider = provider_class()
self.conversation = Conversation()

async def get_myllm_help(self):
"""
Get the help message for MyLLM.
Returns:
str: The help message for the `myllm` command.
"""
return f"{self.commands}\n"

async def get_myllm_info(self):
"""
Get MyLLM information.
Expand All @@ -67,14 +76,6 @@ async def get_myllm_info(self):
f"ℹ️ MyLLM v{__version__}\n {settings.llm_model}\n{settings.llm_provider}"
)

async def get_myllm_help(self):
"""
Get the help message for MyLLM.
Returns:
str: The help message for the `myllm` command.
"""
return f"{self.commands}\n"

async def chat(self, prompt):
"""
Expand All @@ -84,9 +85,8 @@ async def chat(self, prompt):
prompt (str): The prompt message from the user.
Returns:
str: The predicted response from the conversation model.
str: The response from the conversation model.
"""
logger.debug("chat {}", prompt)
self.conversation.add_message("user", prompt)
logger.debug("conversation {}", self.conversation.get_messages())
response = await self.provider.create_async(
Expand All @@ -95,13 +95,11 @@ async def chat(self, prompt):
)
logger.debug("response {}", response)
self.conversation.add_message("ai", response)
logger.debug("conversation {}", self.conversation.get_messages())
return response

async def clear_chat_history(self):
"""
Clears the chat history by setting the `conversation`
attribute to an empty string.
Clears the chat history
"""
self.conversation = Conversation()

Expand All @@ -112,7 +110,7 @@ async def switch_continous_mode(self):


class Conversation:
def __init__(self, max_memory=5):
def __init__(self, max_memory=settings.max_memory):
self.messages = []
self.max_memory = max_memory

Expand All @@ -122,5 +120,4 @@ def add_message(self, role: str, content: str):
self.messages.append({"role": role, "content": content})

def get_messages(self):
logger.debug("messages {}", self.messages)
return self.messages

0 comments on commit c1e464a

Please sign in to comment.