Skip to content

Commit

Permalink
Fixed splitted lines in tutorials, reworked system prompt handling af…
Browse files Browse the repository at this point in the history
…ter initialization
  • Loading branch information
NotBioWaste905 committed Dec 4, 2024
1 parent 9b31ac9 commit 1c4aa24
Show file tree
Hide file tree
Showing 5 changed files with 6 additions and 9 deletions.
4 changes: 1 addition & 3 deletions chatsky/llm/llm_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@
from pydantic import BaseModel
import logging
from chatsky.core.message import Message
from chatsky.core.context import Context
from chatsky.llm.methods import BaseMethod
from chatsky.llm.utils import message_to_langchain
from chatsky.llm._langchain_imports import StrOutputParser, BaseChatModel, BaseMessage, check_langchain_available


Expand All @@ -32,7 +30,7 @@ def __init__(
check_langchain_available()
self.model: BaseChatModel = model
self.parser = StrOutputParser()
self.system_prompt = system_prompt
self.system_prompt = Message(text=system_prompt)

async def respond(
self,
Expand Down
1 change: 1 addition & 0 deletions chatsky/llm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ async def message_to_langchain(
check_langchain_available()
if isinstance(message, str):
message = Message(text=message)

if message.text is None:
content = []
elif len(message.text) > max_size:
Expand Down
6 changes: 3 additions & 3 deletions chatsky/responses/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from chatsky.core.message import Message
from chatsky.core.context import Context
from chatsky.llm.utils import message_to_langchain, context_to_history
from chatsky.llm._langchain_imports import SystemMessage, check_langchain_available
from chatsky.llm._langchain_imports import check_langchain_available
from chatsky.llm.filters import BaseHistoryFilter, DefaultFilter
from chatsky.core.script_function import BaseResponse, AnyResponse

Expand Down Expand Up @@ -51,10 +51,10 @@ class LLMResponse(BaseResponse):
async def call(self, ctx: Context) -> Message:
check_langchain_available()
model = ctx.pipeline.models[self.model_name]
if model.system_prompt == "":
if model.system_prompt.text == "":
history_messages = []
else:
history_messages = [SystemMessage(model.system_prompt)]
history_messages = [await message_to_langchain(model.system_prompt, ctx=ctx, source="system")]
current_node = ctx.current_node
current_misc = current_node.misc
if current_misc is not None:
Expand Down
2 changes: 0 additions & 2 deletions tutorials/llm/1_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
therefore it will store all dialogue history.
This is not advised if you are short on tokens or
if you do not need to store all dialogue history.
Alternatively you can instantiate model object inside
Also note, that langchain reads environment variables for the models
automatically and you do not necessarily need to set them explicitly.
Expand All @@ -58,7 +57,6 @@
As you can see in this script, you can pass an additional prompt to the LLM.
We will cover that thoroughly in the Prompt usage tutorial.
of RESPONSE field in the nodes you need.
"""

# %%
Expand Down
2 changes: 1 addition & 1 deletion tutorials/llm/2_prompt_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@

# %% [markdown]
"""
Chatsky enables you to use more complex prompts then a simple string in need be.
Chatsky enables you to use more complex prompts then a simple string if need be.
In this example we create a VacantPlaces class, that can dynamically retrieve
some external data and put them into the prompt.
Expand Down

0 comments on commit 1c4aa24

Please sign in to comment.