Skip to content

Commit

Permalink
Merge pull request #89 from BESSER-PEARL/dev
Browse files Browse the repository at this point in the history
v1.5.0
  • Loading branch information
mgv99 authored Nov 13, 2024
2 parents cd20663 + ff4b657 commit 5ec4764
Show file tree
Hide file tree
Showing 42 changed files with 1,546 additions and 341 deletions.
1 change: 1 addition & 0 deletions besser/bot/core/entity/entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ def to_json(self) -> dict:
"""
entity_json = {
'base_entity': self.base_entity,
'description': self.description,
'entries': []
}
if not self.base_entity:
Expand Down
2 changes: 2 additions & 0 deletions besser/bot/core/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ class MessageType(Enum):
"""Enumeration of the different message types in :class:`Message`."""

STR = 'str'
MARKDOWN = 'markdown'
HTML = 'html'
FILE = 'file'
IMAGE = 'image'
DATAFRAME = 'dataframe'
Expand Down
72 changes: 72 additions & 0 deletions besser/bot/core/processors/user_adaptation_processor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from besser.bot.core.processors.processor import Processor
from besser.bot.nlp.llm.llm import LLM
from besser.bot.core.bot import Bot
from besser.bot.core.session import Session
from besser.bot.nlp.nlp_engine import NLPEngine


class UserAdaptationProcessor(Processor):
"""The UserAdaptationProcessor takes into account the user's profile and adapts the bot's responses to fit the
profile. The goal is to increase the user experience.
This processor leverages LLMs to adapt the messages given a user profile. For static profiles, an adaptation will be
done once. If the profile changes, then an adapation will be triggered again.
Args:
bot (Bot): The bot the processor belongs to
llm_name (str): the name of the LLM to use.
context (str): additional context to improve the adaptation. should include information about the bot itself
and the task it should accomplish
Attributes:
bot (Bot): The bot the processor belongs to
_llm_name (str): the name of the LLM to use.
_context (str): additional context to improve the adaptation. should include information about the bot itself
and the task it should accomplish
_user_model (dict): dictionary containing the user models
"""
def __init__(self, bot: 'Bot', llm_name: str, context: str = None):
super().__init__(bot=bot, bot_messages=True)
self._llm_name: str = llm_name
self._nlp_engine: 'NLPEngine' = bot.nlp_engine
self._user_model: dict = {}
if context:
self._context = context
else:
self._context = "You are a chatbot."

# add capability to improve/change prompt of context
def process(self, session: 'Session', message: str) -> str:
"""Method to process a message and adapt its content based on a given user model.
The stored user model will be fetched and sent as part of the context.
Args:
session (Session): the current session
message (str): the message to be processed
Returns:
str: the processed message
"""
llm: LLM = self._nlp_engine._llms[self._llm_name]
user_context = f"{self._context}\n\
You are capable of adapting your predefined answers based on a given user profile.\
Your goal is to increase the user experience by adapting the messages based on the different attributes of the user\
profile as best as possible and take all the attributes into account.\
You are free to adapt the messages in any way you like.\
The user should relate more. This is the user's profile\n \
{str(self._user_model[session.id])}"
prompt = f"You need to adapt this message: {message}\n Only respond with the adapated message!"
llm_response: str = llm.predict(prompt, session=session, system_message=user_context)
return llm_response

def add_user_model(self, session: 'Session', user_model: dict) -> None:
"""Method to store the user model internally.
The user model shall be stored internally.
Args:
session (Session): the current session
user_model (dict): the user model of a given user
"""
self._user_model[session.id] = user_model
7 changes: 7 additions & 0 deletions besser/bot/core/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,8 @@ def go_to(self, dest: 'State') -> None:
Args:
dest (State): the destination state
"""
if dest not in self._bot.states:
raise StateNotFound(self._bot, dest)
if self.transitions:
raise ConflictingAutoTransitionError(self._bot, self)
self.transitions.append(Transition(name=self._t_name(), source=self, dest=dest, event=auto, event_params={}))
Expand Down Expand Up @@ -265,6 +267,11 @@ def when_variable_matches_operation_go_to(
target (Any): the target value to which will be used in the operation with the stored value
dest (State): the destination state
"""
if dest not in self._bot.states:
raise StateNotFound(self._bot, dest)
for transition in self.transitions:
if transition.is_auto():
raise ConflictingAutoTransitionError(self._bot, self)
event_params = {'var_name': var_name, 'operation': operation, 'target': target}

self.transitions.append(Transition(name=self._t_name(), source=self, dest=dest, event=variable_matches_operation,
Expand Down
55 changes: 51 additions & 4 deletions besser/bot/nlp/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,26 @@ class LLM(ABC):
nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
name (str): the LLM name
parameters (dict): the LLM parameters
global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
name (str): the LLM name
parameters (dict): the LLM parameters
_global_context (str): the global context to be provided to the LLM for each request
_user_context (dict): aggregation of user specific contexts to be provided to the LLM for each request
_user_contexts (dict): dictionary containing the different context elements making up the user's context
user specific context to be provided to the LLM for each request
"""

def __init__(self, nlp_engine: 'NLPEngine', name: str, parameters: dict):
def __init__(self, nlp_engine: 'NLPEngine', name: str, parameters: dict, global_context: str = None):
self._nlp_engine: 'NLPEngine' = nlp_engine
self.name: str = name
self.parameters: dict = parameters
self._nlp_engine._llms[name] = self
self._global_context: str = global_context
self._user_context: dict = dict
self._user_contexts: dict = dict

def set_parameters(self, parameters: dict) -> None:
"""Set the LLM parameters.
Expand All @@ -49,20 +57,23 @@ def initialize(self) -> None:
pass

@abstractmethod
def predict(self, message: str, parameters: dict = None) -> str:
def predict(self, message: str, parameters: dict = None, session: 'Session' = None,
system_message: str = None) -> str:
"""Make a prediction, i.e., generate an output.
Args:
message (Any): the LLM input text
session (Session): the ongoing session, can be None if no context needs to be applied
parameters (dict): the LLM parameters to use in the prediction. If none is provided, the default LLM
parameters will be used
system_message (str): system message to give high priority context to the LLM
Returns:
str: the LLM output
"""
pass

def chat(self, session: 'Session', parameters: dict = None) -> str:
def chat(self, session: 'Session', parameters: dict = None, system_message: str = None) -> str:
"""Make a prediction, i.e., generate an output.
This function can provide the chat history to the LLM for the output generation, simulating a conversation or
Expand All @@ -71,7 +82,8 @@ def chat(self, session: 'Session', parameters: dict = None) -> str:
Args:
session (Session): the user session
parameters (dict): the LLM parameters. If none is provided, the RAG's default value will be used
system_message (str): system message to give high priority context to the LLM
Returns:
str: the LLM output
"""
Expand Down Expand Up @@ -100,3 +112,38 @@ def intent_classification(
"""
logging.warning(f'Intent Classification not implemented in {self.__class__.__name__}')
return []

def add_user_context(self, session: 'Session', context: str, context_name: str) -> None:
"""Add user-specific context.
Args:
session (Session): the ongoing session
context (str): the user-specific context
context_name (str): the key given to the specific user context
"""
if session.id not in self._user_context:
self._user_contexts[session.id] = {}
self._user_contexts[session.id][context_name] = context
context_message = ""
for context_element in self._user_contexts[session.id]:
context_message = context_message + self._user_contexts[session.id][context_element] + "\n"
self._user_context[session.id] = context_message

def remove_user_context(self, session: 'Session', context_name: str) -> None:
"""Remove user-specific context.
Args:
session (Session): the ongoing session
context_name (str): the key given to the specific user context
"""
if session.id not in self._user_context or context_name not in self._user_contexts[session.id]:
return
else:
self._user_contexts[session.id].pop(context_name)
context_message = ""
for context_element in self._user_contexts[session.id]:
context_message = context_message + self._user_contexts[session.id][context_element] + "\n"
if context_message != "":
self._user_context[session.id] = context_message
else:
self._user_context.pop(session.id)
33 changes: 27 additions & 6 deletions besser/bot/nlp/llm/llm_huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class LLMHuggingFace(LLM):
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0). Necessary a connection to
:class:`~besser.bot.db.monitoring_db.MonitoringDB`.
global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
Expand All @@ -35,10 +37,13 @@ class LLMHuggingFace(LLM):
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0). Necessary a connection to
:class:`~besser.bot.db.monitoring_db.MonitoringDB`.
_global_context (str): the global context to be provided to the LLM for each request
_user_context (dict): user specific context to be provided to the LLM for each request
"""

def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1):
super().__init__(bot.nlp_engine, name, parameters)
def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1,
global_context: str = None):
super().__init__(bot.nlp_engine, name, parameters, global_context)
self.pipe = None
self.num_previous_messages: int = num_previous_messages

Expand All @@ -53,18 +58,34 @@ def set_num_previous_messages(self, num_previous_messages: int) -> None:
def initialize(self) -> None:
self.pipe = pipeline("text-generation", model=self.name)

def predict(self, message: str, parameters: dict = None) -> str:
def predict(self, message: str, parameters: dict = None, session: 'Session' = None,
system_message: str = None) -> str:
if not parameters:
parameters = self.parameters
outputs = self.pipe([{'role': 'user', 'content': message}], return_full_text=False, **parameters)
context_messages = []
if self._global_context:
context_messages.append({'role': 'system', 'content': f"{self._global_context}\n"})
if session and session.id in self._user_context:
context_messages.append({'role': 'system', 'content': f"{self._user_context[session.id]}\n"})
if system_message:
context_messages = context_messages + f"{system_message}\n"
messages = merge_llm_consecutive_messages(context_messages + [{'role': 'user', 'content': message}])
outputs = self.pipe(messages, return_full_text=False, **parameters)
answer = outputs[0]['generated_text']
return answer

def chat(self, session: 'Session', parameters: dict = None) -> str:
def chat(self, session: 'Session', parameters: dict = None, system_message: str = None) -> str:
if not parameters:
parameters = self.parameters
if self.num_previous_messages <= 0:
raise ValueError('The number of previous messages to send to the LLM must be > 0')
context_messages = []
if self._global_context:
context_messages.append({'role': 'system', 'content': f"{self._global_context}\n"})
if session and session.id in self._user_context:
context_messages.append({'role': 'system', 'content': f"{self._user_context[session.id]}\n"})
if system_message:
context_messages.append({'role': 'system', 'content': f"{system_message}\n"})
chat_history: list[Message] = session.get_chat_history(n=self.num_previous_messages)
messages = [
{'role': 'user' if message.is_user else 'assistant', 'content': message.content}
Expand All @@ -73,7 +94,7 @@ def chat(self, session: 'Session', parameters: dict = None) -> str:
]
if not messages:
messages.append({'role': 'user', 'content': session.message})
messages = merge_llm_consecutive_messages(messages)
messages = merge_llm_consecutive_messages(context_messages + messages)
outputs = self.pipe(messages, return_full_text=False, **parameters)
answer = outputs[0]['generated_text']
return answer
Expand Down
23 changes: 19 additions & 4 deletions besser/bot/nlp/llm/llm_huggingface_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

if TYPE_CHECKING:
from besser.bot.core.bot import Bot
from besser.bot.core.session import Session
from besser.bot.nlp.intent_classifier.llm_intent_classifier import LLMIntentClassifier


Expand All @@ -25,17 +26,21 @@ class LLMHuggingFaceAPI(LLM):
parameters (dict): the LLM parameters
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0)
global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
name (str): the LLM name
parameters (dict): the LLM parameters
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0)
_global_context (str): the global context to be provided to the LLM for each request
_user_context (dict): user specific context to be provided to the LLM for each request
"""

def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1):
super().__init__(bot.nlp_engine, name, parameters)
def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1,
global_context: str = None):
super().__init__(bot.nlp_engine, name, parameters, global_context=global_context)
self.num_previous_messages: int = num_previous_messages

def set_model(self, name: str) -> None:
Expand All @@ -57,7 +62,7 @@ def set_num_previous_messages(self, num_previous_messages: int) -> None:
def initialize(self) -> None:
pass

def predict(self, message: str, parameters: dict = None) -> str:
def predict(self, message: str, parameters: dict = None, session: 'Session' = None, system_message: str = None) -> str:
"""Make a prediction, i.e., generate an output.
Runs the `Text Generation Inference API task
Expand All @@ -67,7 +72,8 @@ def predict(self, message: str, parameters: dict = None) -> str:
message (Any): the LLM input text
parameters (dict): the LLM parameters to use in the prediction. If none is provided, the default LLM
parameters will be used
system_message (str): system message to give high priority context to the LLM
Returns:
str: the LLM output
"""
Expand All @@ -78,6 +84,15 @@ def predict(self, message: str, parameters: dict = None) -> str:
parameters['return_full_text'] = False
headers = {"Authorization": f"Bearer {self._nlp_engine.get_property(nlp.HF_API_KEY)}"}
api_url = F"https://api-inference.huggingface.co/models/{self.name}"
context_messages = ""
if self._global_context:
context_messages = f"{self._global_context}\n"
if session and session.id in self._user_context:
context_messages = context_messages + f"{self._user_context[session.id]}\n"
if system_message:
context_messages = context_messages + f"{system_message}\n"
if context_messages != "":
message = context_messages + message
payload = {"inputs": message, "parameters": parameters}
response = requests.post(api_url, headers=headers, json=payload)
return response.json()[0]['generated_text']
Expand Down
Loading

0 comments on commit 5ec4764

Please sign in to comment.