diff --git a/besser/bot/core/entity/entity.py b/besser/bot/core/entity/entity.py
index 5ab805d..5f06885 100644
--- a/besser/bot/core/entity/entity.py
+++ b/besser/bot/core/entity/entity.py
@@ -74,6 +74,7 @@ def to_json(self) -> dict:
"""
entity_json = {
'base_entity': self.base_entity,
+ 'description': self.description,
'entries': []
}
if not self.base_entity:
diff --git a/besser/bot/core/message.py b/besser/bot/core/message.py
index be4f4c8..803f926 100644
--- a/besser/bot/core/message.py
+++ b/besser/bot/core/message.py
@@ -7,6 +7,8 @@ class MessageType(Enum):
"""Enumeration of the different message types in :class:`Message`."""
STR = 'str'
+ MARKDOWN = 'markdown'
+ HTML = 'html'
FILE = 'file'
IMAGE = 'image'
DATAFRAME = 'dataframe'
diff --git a/besser/bot/core/processors/user_adaptation_processor.py b/besser/bot/core/processors/user_adaptation_processor.py
new file mode 100644
index 0000000..2f41be9
--- /dev/null
+++ b/besser/bot/core/processors/user_adaptation_processor.py
@@ -0,0 +1,72 @@
+from besser.bot.core.processors.processor import Processor
+from besser.bot.nlp.llm.llm import LLM
+from besser.bot.core.bot import Bot
+from besser.bot.core.session import Session
+from besser.bot.nlp.nlp_engine import NLPEngine
+
+
+class UserAdaptationProcessor(Processor):
+ """The UserAdaptationProcessor takes into account the user's profile and adapts the bot's responses to fit the
+ profile. The goal is to increase the user experience.
+
+ This processor leverages LLMs to adapt the messages given a user profile. For static profiles, an adaptation will be
+ done once. If the profile changes, then an adapation will be triggered again.
+
+ Args:
+ bot (Bot): The bot the processor belongs to
+ llm_name (str): the name of the LLM to use.
+ context (str): additional context to improve the adaptation. should include information about the bot itself
+ and the task it should accomplish
+
+ Attributes:
+ bot (Bot): The bot the processor belongs to
+ _llm_name (str): the name of the LLM to use.
+ _context (str): additional context to improve the adaptation. should include information about the bot itself
+ and the task it should accomplish
+ _user_model (dict): dictionary containing the user models
+ """
+ def __init__(self, bot: 'Bot', llm_name: str, context: str = None):
+ super().__init__(bot=bot, bot_messages=True)
+ self._llm_name: str = llm_name
+ self._nlp_engine: 'NLPEngine' = bot.nlp_engine
+ self._user_model: dict = {}
+ if context:
+ self._context = context
+ else:
+ self._context = "You are a chatbot."
+
+# add capability to improve/change prompt of context
+ def process(self, session: 'Session', message: str) -> str:
+ """Method to process a message and adapt its content based on a given user model.
+
+ The stored user model will be fetched and sent as part of the context.
+
+ Args:
+ session (Session): the current session
+ message (str): the message to be processed
+
+ Returns:
+ str: the processed message
+ """
+ llm: LLM = self._nlp_engine._llms[self._llm_name]
+ user_context = f"{self._context}\n\
+ You are capable of adapting your predefined answers based on a given user profile.\
+ Your goal is to increase the user experience by adapting the messages based on the different attributes of the user\
+ profile as best as possible and take all the attributes into account.\
+ You are free to adapt the messages in any way you like.\
+ The user should relate more. This is the user's profile\n \
+ {str(self._user_model[session.id])}"
+ prompt = f"You need to adapt this message: {message}\n Only respond with the adapated message!"
+ llm_response: str = llm.predict(prompt, session=session, system_message=user_context)
+ return llm_response
+
+ def add_user_model(self, session: 'Session', user_model: dict) -> None:
+ """Method to store the user model internally.
+
+ The user model shall be stored internally.
+
+ Args:
+ session (Session): the current session
+ user_model (dict): the user model of a given user
+ """
+ self._user_model[session.id] = user_model
diff --git a/besser/bot/core/state.py b/besser/bot/core/state.py
index 9daf7e2..1982184 100644
--- a/besser/bot/core/state.py
+++ b/besser/bot/core/state.py
@@ -196,6 +196,8 @@ def go_to(self, dest: 'State') -> None:
Args:
dest (State): the destination state
"""
+ if dest not in self._bot.states:
+ raise StateNotFound(self._bot, dest)
if self.transitions:
raise ConflictingAutoTransitionError(self._bot, self)
self.transitions.append(Transition(name=self._t_name(), source=self, dest=dest, event=auto, event_params={}))
@@ -265,6 +267,11 @@ def when_variable_matches_operation_go_to(
target (Any): the target value to which will be used in the operation with the stored value
dest (State): the destination state
"""
+ if dest not in self._bot.states:
+ raise StateNotFound(self._bot, dest)
+ for transition in self.transitions:
+ if transition.is_auto():
+ raise ConflictingAutoTransitionError(self._bot, self)
event_params = {'var_name': var_name, 'operation': operation, 'target': target}
self.transitions.append(Transition(name=self._t_name(), source=self, dest=dest, event=variable_matches_operation,
diff --git a/besser/bot/nlp/llm/llm.py b/besser/bot/nlp/llm/llm.py
index 2b1b88e..6de32b6 100644
--- a/besser/bot/nlp/llm/llm.py
+++ b/besser/bot/nlp/llm/llm.py
@@ -22,18 +22,26 @@ class LLM(ABC):
nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
name (str): the LLM name
parameters (dict): the LLM parameters
+ global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
name (str): the LLM name
parameters (dict): the LLM parameters
+ _global_context (str): the global context to be provided to the LLM for each request
+ _user_context (dict): aggregation of user specific contexts to be provided to the LLM for each request
+ _user_contexts (dict): dictionary containing the different context elements making up the user's context
+ user specific context to be provided to the LLM for each request
"""
- def __init__(self, nlp_engine: 'NLPEngine', name: str, parameters: dict):
+ def __init__(self, nlp_engine: 'NLPEngine', name: str, parameters: dict, global_context: str = None):
self._nlp_engine: 'NLPEngine' = nlp_engine
self.name: str = name
self.parameters: dict = parameters
self._nlp_engine._llms[name] = self
+ self._global_context: str = global_context
+ self._user_context: dict = dict
+ self._user_contexts: dict = dict
def set_parameters(self, parameters: dict) -> None:
"""Set the LLM parameters.
@@ -49,20 +57,23 @@ def initialize(self) -> None:
pass
@abstractmethod
- def predict(self, message: str, parameters: dict = None) -> str:
+ def predict(self, message: str, parameters: dict = None, session: 'Session' = None,
+ system_message: str = None) -> str:
"""Make a prediction, i.e., generate an output.
Args:
message (Any): the LLM input text
+ session (Session): the ongoing session, can be None if no context needs to be applied
parameters (dict): the LLM parameters to use in the prediction. If none is provided, the default LLM
parameters will be used
+ system_message (str): system message to give high priority context to the LLM
Returns:
str: the LLM output
"""
pass
- def chat(self, session: 'Session', parameters: dict = None) -> str:
+ def chat(self, session: 'Session', parameters: dict = None, system_message: str = None) -> str:
"""Make a prediction, i.e., generate an output.
This function can provide the chat history to the LLM for the output generation, simulating a conversation or
@@ -71,7 +82,8 @@ def chat(self, session: 'Session', parameters: dict = None) -> str:
Args:
session (Session): the user session
parameters (dict): the LLM parameters. If none is provided, the RAG's default value will be used
-
+ system_message (str): system message to give high priority context to the LLM
+
Returns:
str: the LLM output
"""
@@ -100,3 +112,38 @@ def intent_classification(
"""
logging.warning(f'Intent Classification not implemented in {self.__class__.__name__}')
return []
+
+ def add_user_context(self, session: 'Session', context: str, context_name: str) -> None:
+ """Add user-specific context.
+
+ Args:
+ session (Session): the ongoing session
+ context (str): the user-specific context
+ context_name (str): the key given to the specific user context
+ """
+ if session.id not in self._user_context:
+ self._user_contexts[session.id] = {}
+ self._user_contexts[session.id][context_name] = context
+ context_message = ""
+ for context_element in self._user_contexts[session.id]:
+ context_message = context_message + self._user_contexts[session.id][context_element] + "\n"
+ self._user_context[session.id] = context_message
+
+ def remove_user_context(self, session: 'Session', context_name: str) -> None:
+ """Remove user-specific context.
+
+ Args:
+ session (Session): the ongoing session
+ context_name (str): the key given to the specific user context
+ """
+ if session.id not in self._user_context or context_name not in self._user_contexts[session.id]:
+ return
+ else:
+ self._user_contexts[session.id].pop(context_name)
+ context_message = ""
+ for context_element in self._user_contexts[session.id]:
+ context_message = context_message + self._user_contexts[session.id][context_element] + "\n"
+ if context_message != "":
+ self._user_context[session.id] = context_message
+ else:
+ self._user_context.pop(session.id)
diff --git a/besser/bot/nlp/llm/llm_huggingface.py b/besser/bot/nlp/llm/llm_huggingface.py
index c210092..a9d9d06 100644
--- a/besser/bot/nlp/llm/llm_huggingface.py
+++ b/besser/bot/nlp/llm/llm_huggingface.py
@@ -27,6 +27,8 @@ class LLMHuggingFace(LLM):
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0). Necessary a connection to
:class:`~besser.bot.db.monitoring_db.MonitoringDB`.
+ global_context (str): the global context to be provided to the LLM for each request
+
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
@@ -35,10 +37,13 @@ class LLMHuggingFace(LLM):
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0). Necessary a connection to
:class:`~besser.bot.db.monitoring_db.MonitoringDB`.
+ _global_context (str): the global context to be provided to the LLM for each request
+ _user_context (dict): user specific context to be provided to the LLM for each request
"""
- def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1):
- super().__init__(bot.nlp_engine, name, parameters)
+ def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1,
+ global_context: str = None):
+ super().__init__(bot.nlp_engine, name, parameters, global_context)
self.pipe = None
self.num_previous_messages: int = num_previous_messages
@@ -53,18 +58,34 @@ def set_num_previous_messages(self, num_previous_messages: int) -> None:
def initialize(self) -> None:
self.pipe = pipeline("text-generation", model=self.name)
- def predict(self, message: str, parameters: dict = None) -> str:
+ def predict(self, message: str, parameters: dict = None, session: 'Session' = None,
+ system_message: str = None) -> str:
if not parameters:
parameters = self.parameters
- outputs = self.pipe([{'role': 'user', 'content': message}], return_full_text=False, **parameters)
+ context_messages = []
+ if self._global_context:
+ context_messages.append({'role': 'system', 'content': f"{self._global_context}\n"})
+ if session and session.id in self._user_context:
+ context_messages.append({'role': 'system', 'content': f"{self._user_context[session.id]}\n"})
+ if system_message:
+ context_messages = context_messages + f"{system_message}\n"
+ messages = merge_llm_consecutive_messages(context_messages + [{'role': 'user', 'content': message}])
+ outputs = self.pipe(messages, return_full_text=False, **parameters)
answer = outputs[0]['generated_text']
return answer
- def chat(self, session: 'Session', parameters: dict = None) -> str:
+ def chat(self, session: 'Session', parameters: dict = None, system_message: str = None) -> str:
if not parameters:
parameters = self.parameters
if self.num_previous_messages <= 0:
raise ValueError('The number of previous messages to send to the LLM must be > 0')
+ context_messages = []
+ if self._global_context:
+ context_messages.append({'role': 'system', 'content': f"{self._global_context}\n"})
+ if session and session.id in self._user_context:
+ context_messages.append({'role': 'system', 'content': f"{self._user_context[session.id]}\n"})
+ if system_message:
+ context_messages.append({'role': 'system', 'content': f"{system_message}\n"})
chat_history: list[Message] = session.get_chat_history(n=self.num_previous_messages)
messages = [
{'role': 'user' if message.is_user else 'assistant', 'content': message.content}
@@ -73,7 +94,7 @@ def chat(self, session: 'Session', parameters: dict = None) -> str:
]
if not messages:
messages.append({'role': 'user', 'content': session.message})
- messages = merge_llm_consecutive_messages(messages)
+ messages = merge_llm_consecutive_messages(context_messages + messages)
outputs = self.pipe(messages, return_full_text=False, **parameters)
answer = outputs[0]['generated_text']
return answer
diff --git a/besser/bot/nlp/llm/llm_huggingface_api.py b/besser/bot/nlp/llm/llm_huggingface_api.py
index 935d57e..bd57c78 100644
--- a/besser/bot/nlp/llm/llm_huggingface_api.py
+++ b/besser/bot/nlp/llm/llm_huggingface_api.py
@@ -9,6 +9,7 @@
if TYPE_CHECKING:
from besser.bot.core.bot import Bot
+ from besser.bot.core.session import Session
from besser.bot.nlp.intent_classifier.llm_intent_classifier import LLMIntentClassifier
@@ -25,6 +26,7 @@ class LLMHuggingFaceAPI(LLM):
parameters (dict): the LLM parameters
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0)
+ global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
@@ -32,10 +34,13 @@ class LLMHuggingFaceAPI(LLM):
parameters (dict): the LLM parameters
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0)
+ _global_context (str): the global context to be provided to the LLM for each request
+ _user_context (dict): user specific context to be provided to the LLM for each request
"""
- def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1):
- super().__init__(bot.nlp_engine, name, parameters)
+ def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1,
+ global_context: str = None):
+ super().__init__(bot.nlp_engine, name, parameters, global_context=global_context)
self.num_previous_messages: int = num_previous_messages
def set_model(self, name: str) -> None:
@@ -57,7 +62,7 @@ def set_num_previous_messages(self, num_previous_messages: int) -> None:
def initialize(self) -> None:
pass
- def predict(self, message: str, parameters: dict = None) -> str:
+ def predict(self, message: str, parameters: dict = None, session: 'Session' = None, system_message: str = None) -> str:
"""Make a prediction, i.e., generate an output.
Runs the `Text Generation Inference API task
@@ -67,7 +72,8 @@ def predict(self, message: str, parameters: dict = None) -> str:
message (Any): the LLM input text
parameters (dict): the LLM parameters to use in the prediction. If none is provided, the default LLM
parameters will be used
-
+ system_message (str): system message to give high priority context to the LLM
+
Returns:
str: the LLM output
"""
@@ -78,6 +84,15 @@ def predict(self, message: str, parameters: dict = None) -> str:
parameters['return_full_text'] = False
headers = {"Authorization": f"Bearer {self._nlp_engine.get_property(nlp.HF_API_KEY)}"}
api_url = F"https://api-inference.huggingface.co/models/{self.name}"
+ context_messages = ""
+ if self._global_context:
+ context_messages = f"{self._global_context}\n"
+ if session and session.id in self._user_context:
+ context_messages = context_messages + f"{self._user_context[session.id]}\n"
+ if system_message:
+ context_messages = context_messages + f"{system_message}\n"
+ if context_messages != "":
+ message = context_messages + message
payload = {"inputs": message, "parameters": parameters}
response = requests.post(api_url, headers=headers, json=payload)
return response.json()[0]['generated_text']
diff --git a/besser/bot/nlp/llm/llm_openai_api.py b/besser/bot/nlp/llm/llm_openai_api.py
index f5fbc87..90b5a5e 100644
--- a/besser/bot/nlp/llm/llm_openai_api.py
+++ b/besser/bot/nlp/llm/llm_openai_api.py
@@ -24,6 +24,7 @@ class LLMOpenAI(LLM):
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0). Necessary a connection to
:class:`~besser.bot.db.monitoring_db.MonitoringDB`.
+ global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
@@ -32,10 +33,13 @@ class LLMOpenAI(LLM):
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0). Necessary a connection to
:class:`~besser.bot.db.monitoring_db.MonitoringDB`.
+ _global_context (str): the global context to be provided to the LLM for each request
+ _user_context (dict): user specific context to be provided to the LLM for each request
"""
- def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1):
- super().__init__(bot.nlp_engine, name, parameters)
+ def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1,
+ global_context: str = None):
+ super().__init__(bot.nlp_engine, name, parameters, global_context=global_context)
self.client: OpenAI = None
self.num_previous_messages: int = num_previous_messages
@@ -58,19 +62,25 @@ def set_num_previous_messages(self, num_previous_messages: int) -> None:
def initialize(self) -> None:
self.client = OpenAI(api_key=self._nlp_engine.get_property(nlp.OPENAI_API_KEY))
- def predict(self, message: str, parameters: dict = None) -> str:
+ def predict(self, message: str, parameters: dict = None, session: 'Session' = None, system_message: str = None) -> str:
+ messages = []
+ if self._global_context:
+ messages.append({"role": "system", "content": self._global_context})
+ if session and session.id in self._user_context:
+ messages.append({"role": "system", "content": self._user_context[session.id]})
+ if system_message:
+ messages.append({"role": "system", "content": system_message})
+ messages.append({"role": "user", "content": message})
if not parameters:
parameters = self.parameters
response = self.client.chat.completions.create(
model=self.name,
- messages=[
- {"role": "user", "content": message}
- ],
+ messages=messages,
**parameters,
)
return response.choices[0].message.content
- def chat(self, session: 'Session', parameters: dict = None) -> str:
+ def chat(self, session: 'Session', parameters: dict = None, system_message: str = None) -> str:
if not parameters:
parameters = self.parameters
if self.num_previous_messages <= 0:
@@ -83,9 +93,16 @@ def chat(self, session: 'Session', parameters: dict = None) -> str:
]
if not messages:
messages.append({'role': 'user', 'content': session.message})
+ context_messages = []
+ if self._global_context:
+ context_messages.append({"role": "system", "content": self._global_context})
+ if session and session.id in self._user_context:
+ context_messages.append({"role": "system", "content": self._user_context[session.id]})
+ if system_message:
+ context_messages.append({"role": "system", "content": system_message})
response = self.client.chat.completions.create(
model=self.name,
- messages=messages,
+ messages=context_messages + messages,
**parameters,
)
return response.choices[0].message.content
diff --git a/besser/bot/nlp/llm/llm_replicate_api.py b/besser/bot/nlp/llm/llm_replicate_api.py
index 0cfbe28..8e8c92d 100644
--- a/besser/bot/nlp/llm/llm_replicate_api.py
+++ b/besser/bot/nlp/llm/llm_replicate_api.py
@@ -10,6 +10,7 @@
if TYPE_CHECKING:
from besser.bot.core.bot import Bot
+ from besser.bot.core.session import Session
from besser.bot.nlp.intent_classifier.llm_intent_classifier import LLMIntentClassifier
@@ -22,6 +23,7 @@ class LLMReplicate(LLM):
parameters (dict): the LLM parameters
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0)
+ global_context (str): the global context to be provided to the LLM for each request
Attributes:
_nlp_engine (NLPEngine): the NLPEngine that handles the NLP processes of the bot the LLM belongs to
@@ -29,10 +31,13 @@ class LLMReplicate(LLM):
parameters (dict): the LLM parameters
num_previous_messages (int): for the chat functionality, the number of previous messages of the conversation
to add to the prompt context (must be > 0)
+ _global_context (str): the global context to be provided to the LLM for each request
+ _user_context (dict): user specific context to be provided to the LLM for each request
"""
- def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1):
- super().__init__(bot.nlp_engine, name, parameters)
+ def __init__(self, bot: 'Bot', name: str, parameters: dict, num_previous_messages: int = 1,
+ global_context: str = None):
+ super().__init__(bot.nlp_engine, name, parameters, global_context=global_context)
self.num_previous_messages: int = num_previous_messages
def set_model(self, name: str) -> None:
@@ -55,11 +60,20 @@ def initialize(self) -> None:
if 'REPLICATE_API_TOKEN' not in os.environ:
os.environ['REPLICATE_API_TOKEN'] = self._nlp_engine.get_property(nlp.REPLICATE_API_KEY)
- def predict(self, message: str, parameters: dict = None) -> str:
+ def predict(self, message: str, parameters: dict = None, session: 'Session' = None, system_message: str = None) -> str:
if not parameters:
parameters = self.parameters.copy()
else:
parameters = parameters.copy()
+ context_messages = ""
+ if self._global_context:
+ context_messages = f"{self._global_context}\n"
+ if session and session.id in self._user_context:
+ context_messages = context_messages + f"{self._user_context[session.id]}\n"
+ if system_message:
+ context_messages = context_messages + f"{system_message}\n"
+ if context_messages != "":
+ message = context_messages + message
parameters['prompt'] = message
answer = replicate.run(
self.name,
diff --git a/besser/bot/platforms/payload.py b/besser/bot/platforms/payload.py
index 4afa894..bc858ad 100644
--- a/besser/bot/platforms/payload.py
+++ b/besser/bot/platforms/payload.py
@@ -23,6 +23,14 @@ class PayloadAction(Enum):
BOT_REPLY_STR = 'bot_reply_str'
"""PayloadAction: Indicates that the payload's purpose is to send a bot reply containing a :class:`str` object."""
+ BOT_REPLY_MARKDOWN = 'bot_reply_markdown'
+ """PayloadAction: Indicates that the payload's purpose is to send a bot reply containing a :class:`str` object
+ in Markdown format."""
+
+ BOT_REPLY_HTML = 'bot_reply_html'
+ """PayloadAction: Indicates that the payload's purpose is to send a bot reply containing a :class:`str` object
+ in HTML format."""
+
BOT_REPLY_FILE = 'bot_reply_file'
"""PayloadAction: Indicates that the payload's purpose is to send a bot reply containing a :class:`file.File`
object."""
@@ -52,6 +60,10 @@ class PayloadAction(Enum):
"""
BOT_REPLY_RAG = 'bot_reply_rag'
+ """PayloadAction: Indicates that the payload's purpose is to send a bot reply containing a RAG (Retrieval Augmented
+ Generation) answer, which contains an LLM-generated answer and a set of documents the LLM used as context
+ (see :class:`besser.bot.nlp.rag.rag.RAGMessage`).
+ """
class Payload:
diff --git a/besser/bot/platforms/websocket/chat_widget/css/style.css b/besser/bot/platforms/websocket/chat_widget/css/style.css
new file mode 100644
index 0000000..404127b
--- /dev/null
+++ b/besser/bot/platforms/websocket/chat_widget/css/style.css
@@ -0,0 +1,199 @@
+/* Chat widget styling */
+#chat-window {
+ position: fixed;
+ bottom: 90px;
+ right: 20px;
+ width: 400px;
+ height: 600px;
+ max-height: 90vh;
+ max-width: 90vw;
+ border: 1px solid #ccc;
+ border-radius: 8px;
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+ font-family: Arial, sans-serif;
+ opacity: 0;
+ transform: translateY(50px);
+ transition: opacity 0.3s ease, transform 0.3s ease;
+ visibility: hidden;
+}
+
+#chat-window.visible {
+ opacity: 1;
+ transform: translateY(0);
+ visibility: visible;
+}
+
+#chat-header {
+ background-color: #0078D7;
+ color: #fff;
+ padding: 10px;
+ text-align: center;
+ cursor: pointer;
+}
+
+#chat-messages {
+ flex: 1;
+ padding: 10px;
+ overflow-y: auto;
+ background-color: #f9f9f9;
+}
+
+.user-message {
+ padding: 8px;
+ margin-top: 8px;
+ border-radius: 4px;
+ width: fit-content;
+ margin-left: auto;
+ background-color: #e1ffc7;
+}
+
+.bot-message {
+ padding: 8px;
+ margin-top: 8px;
+ border-radius: 4px;
+ width: fit-content;
+ margin-right: auto;
+ background-color: #eee;
+}
+
+.markdown-message {
+}
+
+.html-message {
+}
+
+.image-message {
+ max-width: 90%;
+ height: auto;
+ display: block;
+}
+
+.dataframe-message {
+ border-collapse: collapse;
+}
+
+.dataframe-message th, .dataframe-message td {
+ border: 1px solid #c4c4c4;
+ padding: 8px;
+ text-align: left;
+}
+
+.dataframe-message th {
+ background-color: #e1e1e1;
+ font-weight: bold;
+}
+
+.options-message {
+ background: none;
+ width: 90%;
+}
+
+.location-message {
+ width: 95%;
+ height: 300px;
+}
+
+.plotly-message {
+ background: none;
+ width: 95%;
+ height: 300px;
+}
+
+.button {
+ background-color: #0078D7;
+ color: #fff;
+ font-weight: bold;
+ text-decoration: none;
+ padding: 8px;
+ margin-right: 10px;
+ margin-top: 20px;
+ border-radius: 16px;
+ cursor: pointer;
+ overflow-wrap: break-word;
+ line-height: 2.5;
+}
+
+/* Full-screen modal style */
+.plotly-fullscreen-modal {
+ display: none;
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(0, 0, 0, 0.8);
+ z-index: 9999;
+ justify-content: center;
+ align-items: center;
+}
+
+/* Full-screen chart container */
+.plotly-fullscreen-chart {
+ width: 90%;
+ height: 90%;
+ background-color: #fff;
+}
+
+#chat-input {
+ display: flex;
+ border-top: 1px solid #eeeeee;
+}
+
+#chat-input input {
+ flex: 1;
+ padding: 10px;
+ border: none;
+ outline: none;
+}
+
+#chat-input button {
+ background-color: #0078D7;
+ color: #fff;
+ padding: 10px;
+ border: none;
+ cursor: pointer;
+}
+
+#chat-input button:hover {
+ background-color: #005fa3;
+}
+
+/* Circle button styling */
+#circle-button {
+ position: fixed;
+ bottom: 20px;
+ right: 20px;
+ width: 50px;
+ height: 50px;
+ border-radius: 50%;
+ transition: transform 0.5s ease;
+}
+
+#circle-button img {
+ width: 50px;/* Adjust size as needed */
+ height: 50px;
+ top: 20px;
+ left: 20px;
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.3);
+ cursor: pointer;
+ border-radius: 50%;
+}
+
+/* Rotated class that triggers the spin */
+.spin {
+ transform: rotate(360deg);
+}
+
+ /* Typing indicator styling */
+#typing-indicator {
+ display: none;
+ padding: 8px;
+ width: fit-content;
+}
+
+#typing-indicator img {
+ width: 40px; /* Adjust the size as needed */
+}
\ No newline at end of file
diff --git a/besser/bot/platforms/websocket/chat_widget/data/args.json b/besser/bot/platforms/websocket/chat_widget/data/args.json
new file mode 100644
index 0000000..4147aa2
--- /dev/null
+++ b/besser/bot/platforms/websocket/chat_widget/data/args.json
@@ -0,0 +1,9 @@
+{
+ "userName": "JohnDoe",
+ "chatbotName": "AmazingBot",
+ "themeColor": "#34a4bd",
+ "wsAddress": "ws://localhost:8765",
+ "messageInputPlaceHolder": "Write something...",
+ "icon": "img/bot_logo.jpeg",
+ "typingAnimation": "img/typing_dots.gif"
+}
diff --git a/besser/bot/platforms/websocket/chat_widget/img/bot_logo.jpeg b/besser/bot/platforms/websocket/chat_widget/img/bot_logo.jpeg
new file mode 100644
index 0000000..7caed0d
Binary files /dev/null and b/besser/bot/platforms/websocket/chat_widget/img/bot_logo.jpeg differ
diff --git a/besser/bot/platforms/websocket/chat_widget/img/typing_dots.gif b/besser/bot/platforms/websocket/chat_widget/img/typing_dots.gif
new file mode 100644
index 0000000..c52060c
Binary files /dev/null and b/besser/bot/platforms/websocket/chat_widget/img/typing_dots.gif differ
diff --git a/besser/bot/platforms/websocket/chat_widget/index.html b/besser/bot/platforms/websocket/chat_widget/index.html
new file mode 100644
index 0000000..0e7b444
--- /dev/null
+++ b/besser/bot/platforms/websocket/chat_widget/index.html
@@ -0,0 +1,30 @@
+
+
+
+ """)
+
- Pandas `DataFrames `_:
.. code:: python
diff --git a/docs/source/wiki/platforms/websocket_platform/chat_widget.rst b/docs/source/wiki/platforms/websocket_platform/chat_widget.rst
new file mode 100644
index 0000000..3a0d69f
--- /dev/null
+++ b/docs/source/wiki/platforms/websocket_platform/chat_widget.rst
@@ -0,0 +1,38 @@
+Chat widget
+===========
+
+The chat widget UI allows to integrate a chatbot in any webpage. It is located in a window corner, expanded/hidden when clicking on an icon.
+
+This is how our chatbot UI looks like:
+
+.. figure:: ../../../img/chat_widget_demo.gif
+ :alt: Chat Widget demo
+ :scale: 70%
+
+Parameters
+----------
+
+The file data/args.json contains parameters you can set to customize the chat widget (websocket address, bot icon, colors, ...)
+
+How to use it
+-------------
+
+Just go to the chat_widget directory and open the **index.html** file.
+
+.. note::
+
+ The parameters can only be read from the JSON file when running the interface from a server, not from the file system.
+
+ You can create a simple server by running the following in the chat widget directory:
+
+ .. code:: bash
+
+ python -m http.server
+
+ This will serve your files at http://localhost:8000
+
+ If you want to run it from the file system, you will have to hardcode the parameters (instead of loading them from
+ an external file, just write your desired values in the renderChatWidget function in the js/script.js file)
+
+To integrate the chat widget in a real webpage, just copy the content in index.html into the html of your webpage.
+Make sure to include the other directories in the webpage dependencies, as they contain the chat widget code.
diff --git a/docs/source/wiki/platforms/websocket_platform/streamlit_ui.rst b/docs/source/wiki/platforms/websocket_platform/streamlit_ui.rst
new file mode 100644
index 0000000..381d26d
--- /dev/null
+++ b/docs/source/wiki/platforms/websocket_platform/streamlit_ui.rst
@@ -0,0 +1,26 @@
+Streamlit UI
+============
+
+We provide a Streamlit UI implementing a WebSocket client to communicate with the bot.
+
+This is how our chatbot UI looks like:
+
+.. figure:: ../../../img/streamlit_ui_demo.gif
+ :alt: WebSocket UI demo
+
+How to use it
+-------------
+
+You can run it directly from the bot, by setting it in the websocket_platform:
+
+.. code:: python
+
+ bot = Bot('example_bot')
+ ...
+ websocket_platform = bot.use_websocket_platform(use_ui=True)
+
+Or you can also run it separately. Just open a terminal on the streamlit UI directory, and run:
+
+.. code:: bash
+
+ streamlit run --server.address localhost --server.port 5000 streamlit_ui.py bot_name localhost 8765
diff --git a/requirements.txt b/requirements.txt
index 5634f61..dd5a04b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,3 @@
-audio-recorder-streamlit==0.0.8
chromadb==0.5.4
dateparser==1.1.8
keras==2.14.0
@@ -22,7 +21,7 @@ spacy==3.7.2
SpeechRecognition==3.10.0
# spellux @ git+https://github.com/Aran30/spellux # Not available in PyPi. Install manually with `pip install git+https://github.com/Aran30/spellux.git`
sqlalchemy==2.0.29
-streamlit==1.27.2
+streamlit==1.40.0
streamlit-antd-components==0.3.2
tensorflow==2.14.0
text2num==2.5.0
diff --git a/setup.cfg b/setup.cfg
index 341a321..9bc3c02 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = besser-bot-framework
-version = 1.4.0
+version = 1.5.0
author = Luxembourg Institute of Science and Technology
description = BESSER Bot Framework (BBF)
long_description = file: README.md