Skip to content

Commit

Permalink
Merge pull request #1183 from mraniki/dev
Browse files Browse the repository at this point in the history
🔥 remove llm export unit test
  • Loading branch information
mraniki authored Jan 26, 2024
2 parents 4f569d4 + a121809 commit 3682982
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 232 deletions.
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ talkytrend = "1.15.9"
iamlistening = "4.2.35"
findmyorder = "1.9.10"
dxsp = "7.0.7"
cefi = "3.2.45"
myllm = "4.0.2"
cefi = "3.3.0"
myllm = "4.0.4"
ib_insync = "0.9.86"

[tool.poetry.group.dev.dependencies]
Expand Down
20 changes: 8 additions & 12 deletions tests/test_llm_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,14 @@ def set_test_settings():
def test_fixture_plugin():
return LlmPlugin()


@pytest.mark.asyncio
async def test_plugin(plugin):
"""Test message handling"""
await plugin.handle_message(f"{settings.bot_prefix}{settings.bot_command_question}")
assert plugin.should_handle("any message") is True
assert plugin.llm is not None
assert plugin.llm.provider is not None
assert callable(plugin.llm.chat)
assert callable(plugin.llm.switch_continous_mode)
assert callable(plugin.llm.clear_chat_history)
assert callable(plugin.llm.export_chat_history)


@pytest.mark.asyncio
Expand Down Expand Up @@ -57,29 +54,28 @@ async def test_parsing_llm(plugin):
@pytest.mark.asyncio
async def test_parsing_info(plugin):
"""Test info"""
plugin.llm.get_myllm_info = AsyncMock()
plugin.llm.get_info = AsyncMock()
await plugin.handle_message(f"{settings.bot_prefix}{settings.bot_command_info}")
plugin.llm.get_myllm_info.assert_awaited_once()
plugin.llm.get_info.assert_awaited_once()


@pytest.mark.asyncio
async def test_info(plugin):
"""Test info"""
result = await plugin.llm.get_myllm_info()
result = await plugin.llm.get_info()
assert result is not None


@pytest.mark.asyncio
async def test_llm_chat(plugin):
"""Test llm"""
print(plugin.llm.provider)
result = await plugin.llm.chat("tell me a story")
sleep(20)
print(result)
assert result is not None


@pytest.mark.asyncio
async def test_clear_chat_history(plugin):
result = plugin.llm.export_chat_history()
assert result is not None
# @pytest.mark.asyncio
# async def test_clear_chat_history(plugin):
# result = plugin.llm.export_chat_history()
# assert result is not None
202 changes: 0 additions & 202 deletions tt/plugins/default_plugins/broker_IBKR_plugin.py

This file was deleted.

26 changes: 12 additions & 14 deletions tt/plugins/default_plugins/llm_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""
import os
import threading

from myllm import MyLLM

from tt.config import settings
Expand Down Expand Up @@ -33,27 +33,25 @@ async def send_notification(self, message):
if self.enabled:
await send_notification(message)


async def handle_message(self, msg):
"""Handles incoming messages"""
if not self.should_handle(msg):
return
if (
self.llm.llm_ai_mode
and (settings.bot_ignore not in msg)
and (not msg.startswith(settings.bot_prefix))
):
threading.Thread(target=self.process_chat, args=(msg,)).start()

# if (
# self.llm.llm_ai_mode
# and (settings.bot_ignore not in msg)
# and (not msg.startswith(settings.bot_prefix))
# ):
# threading.Thread(target=self.process_chat, args=(msg,)).start()

if msg.startswith(settings.bot_prefix):
command, *args = msg.split(" ")
command = command[1:]

command_mapping = {
settings.bot_command_info: self.llm.get_myllm_info,
settings.bot_command_aimode: self.llm.switch_continous_mode,
settings.bot_command_aiclear: self.llm.clear_chat_history,
settings.bot_command_info: self.llm.get_info,
# settings.bot_command_aimode: self.llm.switch_continous_mode,
# settings.bot_command_aiclear: self.llm.clear_chat_history,
settings.bot_command_aiexport: self.llm.export_chat_history,
settings.bot_command_aichat: lambda: self.llm.chat(str(args)),
}
Expand All @@ -62,5 +60,5 @@ async def handle_message(self, msg):
await self.send_notification(f"{await function()}")

def process_chat(self, msg):
chat = self.llm.chat(str(msg))
self.send_notification(chat)
chat = self.llm.chat(str(msg))
self.send_notification(chat)
5 changes: 3 additions & 2 deletions tt/talky_settings.toml
Original file line number Diff line number Diff line change
Expand Up @@ -336,8 +336,9 @@ token_personal_list = "https://raw.githubusercontent.com/mraniki/tokenlist/main/
###################################
### AI MYLLM SETTINGS ###
###################################

[default.myllm.template]
myllm_enabled = true
llm_ai_mode = false
[default.myllm.g4f]
enabled = true
llm_model= "gpt_4"
llm_provider = "g4f.Provider.Bing" # Refer to https://github.com/xtekky/gpt4free
Expand Down

0 comments on commit 3682982

Please sign in to comment.