diff --git a/myllm/default_settings.toml b/myllm/default_settings.toml index e11d47e3..b2876ef7 100644 --- a/myllm/default_settings.toml +++ b/myllm/default_settings.toml @@ -17,7 +17,7 @@ VALUE = "On default" # Module Enable/Disable -llm_enabled = true +myllm_enabled = true # LLM Model to use llm_model = "gpt-3.5-turbo" @@ -25,8 +25,12 @@ llm_model = "gpt-3.5-turbo" # LLM Provider # Refer to https://github.com/xtekky/gpt4free # for the list of supported provider -# llm_provider = "g4f.Provider.ChatgptAi" llm_provider = "g4f.Provider.Bing" + +# Number of conversation history +# between user and ai +max_memory = 5 + # help message listing the commands # available llm_commands = """ @@ -35,14 +39,7 @@ llm_commands = """ ➰ /aimode\n 🧽 /clearai\n """ -# command for topic/conversation -bot_command_question = "qq" -# command for reset chat -bot_command_clearai = "clearai" -# command to activate or deactivate aimode -# not implemented -bot_command_aimode = "aimode" llm_ai_mode = false # template prompt context diff --git a/myllm/main.py b/myllm/main.py index be685dd0..4c06cf4d 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -45,7 +45,7 @@ def __init__(self): None """ - self.enabled = settings.llm_enabled + self.enabled = settings.myllm_enabled if not self.enabled: return self.commands = settings.llm_commands @@ -56,6 +56,15 @@ def __init__(self): self.provider = provider_class() self.conversation = Conversation() + async def get_myllm_help(self): + """ + Get the help message for MyLLM. + + Returns: + str: The help message for the `myllm` command. + """ + return f"{self.commands}\n" + async def get_myllm_info(self): """ Get MyLLM information. @@ -67,14 +76,6 @@ async def get_myllm_info(self): f"ℹ️ MyLLM v{__version__}\n {settings.llm_model}\n{settings.llm_provider}" ) - async def get_myllm_help(self): - """ - Get the help message for MyLLM. - - Returns: - str: The help message for the `myllm` command. - """ - return f"{self.commands}\n" async def chat(self, prompt): """ @@ -84,9 +85,8 @@ async def chat(self, prompt): prompt (str): The prompt message from the user. Returns: - str: The predicted response from the conversation model. + str: The response from the conversation model. """ - logger.debug("chat {}", prompt) self.conversation.add_message("user", prompt) logger.debug("conversation {}", self.conversation.get_messages()) response = await self.provider.create_async( @@ -95,13 +95,11 @@ async def chat(self, prompt): ) logger.debug("response {}", response) self.conversation.add_message("ai", response) - logger.debug("conversation {}", self.conversation.get_messages()) return response async def clear_chat_history(self): """ - Clears the chat history by setting the `conversation` - attribute to an empty string. + Clears the chat history """ self.conversation = Conversation() @@ -112,7 +110,7 @@ async def switch_continous_mode(self): class Conversation: - def __init__(self, max_memory=5): + def __init__(self, max_memory=settings.max_memory): self.messages = [] self.max_memory = max_memory @@ -122,5 +120,4 @@ def add_message(self, role: str, content: str): self.messages.append({"role": role, "content": content}) def get_messages(self): - logger.debug("messages {}", self.messages) return self.messages