From 35dcfcdd0b6a23e5a0555d22ba946840298037b4 Mon Sep 17 00:00:00 2001 From: Matt Bertrand Date: Fri, 20 Dec 2024 10:26:28 -0500 Subject: [PATCH] Demo Course Recommendation Chatbot (not production-ready, for RC only) (#1918) --- .devcontainer/devcontainer.json | 13 +- Dockerfile-litellm | 18 + ai_chat/__init__.py | 0 ai_chat/agents.py | 467 ++++++++++++++++ ai_chat/agents_test.py | 259 +++++++++ ai_chat/apps.py | 9 + ai_chat/conftest.py | 10 + ai_chat/constants.py | 15 + ai_chat/factories.py | 18 + ai_chat/migrations/0001_initial.py | 30 ++ ai_chat/migrations/__init__.py | 0 ai_chat/permissions.py | 13 + ai_chat/proxy.py | 111 ++++ ai_chat/serializers.py | 37 ++ ai_chat/urls.py | 14 + ai_chat/utils.py | 11 + ai_chat/views.py | 65 +++ ai_chat/views_test.py | 57 ++ app.json | 60 +++ config/litellm_config.yml | 15 + docker-compose.litellm.yml | 23 + fixtures/common.py | 2 +- frontends/api/src/generated/v0/api.ts | 204 +++++++ frontends/main/package.json | 3 + .../main/public/images/mit_mascot_tim.png | Bin 0 -> 22493 bytes .../main/src/app-pages/ChatPage/ChatPage.tsx | 54 ++ frontends/main/src/app-pages/ChatPage/send.ts | 79 +++ frontends/main/src/app/chat/page.tsx | 16 + frontends/main/src/common/feature_flags.ts | 1 + .../Nlux-AiChat/AiChat.stories.tsx | 52 ++ .../page-components/Nlux-AiChat/AiChat.tsx | 80 +++ .../Nlux-AiChat/mit_mascot_tim.png | Bin 0 -> 22493 bytes .../page-components/Nlux-AiChat/mock-send.ts | 113 ++++ .../Nlux-AiChat/nlux-theme.css | 170 ++++++ .../page-components/Nlux-AiChat/personas.tsx | 14 + frontends/ol-utilities/src/lib/index.ts | 1 + frontends/ol-utilities/src/lib/utils.ts | 10 + generate_env.py | 44 +- main/settings.py | 35 +- main/urls.py | 12 +- openapi/specs/v0.yaml | 50 ++ package.json | 4 + poetry.lock | 503 +++++++++++++++++- pyproject.toml | 3 + uwsgi.ini | 2 + yarn.lock | 31 ++ 46 files changed, 2691 insertions(+), 37 deletions(-) create mode 100644 Dockerfile-litellm create mode 100644 ai_chat/__init__.py create mode 100644 ai_chat/agents.py create mode 100644 ai_chat/agents_test.py create mode 100644 ai_chat/apps.py create mode 100644 ai_chat/conftest.py create mode 100644 ai_chat/constants.py create mode 100644 ai_chat/factories.py create mode 100644 ai_chat/migrations/0001_initial.py create mode 100644 ai_chat/migrations/__init__.py create mode 100644 ai_chat/permissions.py create mode 100644 ai_chat/proxy.py create mode 100644 ai_chat/serializers.py create mode 100644 ai_chat/urls.py create mode 100644 ai_chat/utils.py create mode 100644 ai_chat/views.py create mode 100644 ai_chat/views_test.py create mode 100644 config/litellm_config.yml create mode 100644 docker-compose.litellm.yml create mode 100644 frontends/main/public/images/mit_mascot_tim.png create mode 100644 frontends/main/src/app-pages/ChatPage/ChatPage.tsx create mode 100644 frontends/main/src/app-pages/ChatPage/send.ts create mode 100644 frontends/main/src/app/chat/page.tsx create mode 100644 frontends/main/src/page-components/Nlux-AiChat/AiChat.stories.tsx create mode 100644 frontends/main/src/page-components/Nlux-AiChat/AiChat.tsx create mode 100644 frontends/main/src/page-components/Nlux-AiChat/mit_mascot_tim.png create mode 100644 frontends/main/src/page-components/Nlux-AiChat/mock-send.ts create mode 100644 frontends/main/src/page-components/Nlux-AiChat/nlux-theme.css create mode 100644 frontends/main/src/page-components/Nlux-AiChat/personas.tsx diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c0223f625f..80b6041cef 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,13 @@ { "dockerComposeFile": "../docker-compose.codespaces.yml", "service": "web", + "containerEnv": { + "OPENAI_API_KEY": "${localEnv:OPENAI_API_KEY}", + "AI_PROXY_AUTH_TOKEN": "${localEnv:AI_PROXY_AUTH_TOKEN}", + "POSTHOG_PROJECT_ID": "${localEnv:POSTHOG_PROJECT_ID}", + "POSTHOG_PROJECT_API_KEY": "${localEnv:POSTHOG_PROJECT_API_KEY}", + "POSTHOG_PERSONAL_API_KEY": "${localEnv:POSTHOG_PERSONAL_API_KEY}" + }, "runServices": [ "watch", "web", @@ -10,6 +17,7 @@ "celery", "nginx", "redis", + "litellm", "qdrant" ], "hostRequirements": { "cpus": 4, "memory": "8gb" }, @@ -21,9 +29,10 @@ "OLL_API_CLIENT_SECRET": {}, "OLL_API_CLIENT_ID": {}, "SEE_API_CLIENT_ID": {}, - "SEE_API_CLIENT_SECRET": {} + "SEE_API_CLIENT_SECRET": {}, + "OPENAI_API_KEY": {} }, "features": {}, "postStartCommand": "while [ \"$(python manage.py showmigrations | grep \"\\[ \\]\" | wc -l)\" -ne \"0\" ]; do echo \"waiting for migrations\"; sleep 2; done && python manage.py update_offered_by && python manage.py update_platforms && python manage.py update_departments_schools && python manage.py update_course_number_departments && python manage.py backpopulate_mitxonline_data && python manage.py backpopulate_micromasters_data && python manage.py backpopulate_resource_channels --overwrite --all && python manage.py recreate_index --all", - "forwardPorts": [8062, 8063, 6333] + "forwardPorts": [4000, 6333, 8062, 8063] } diff --git a/Dockerfile-litellm b/Dockerfile-litellm new file mode 100644 index 0000000000..6fa3f3ad4e --- /dev/null +++ b/Dockerfile-litellm @@ -0,0 +1,18 @@ +# Use the provided base image +FROM ghcr.io/berriai/litellm:main-latest + +# Set the working directory to /app +WORKDIR /app + + +# Make sure your docker/entrypoint.sh is executable +RUN chmod +x ./docker/entrypoint.sh + +# Expose the necessary port +EXPOSE 4000/tcp + +# Override the CMD instruction with your desired command and arguments +# WARNING: FOR PROD DO NOT USE `--detailed_debug` it slows down response times, instead use the following CMD +# CMD ["--port", "4000", "--config", "config.yaml"] + +CMD ["--port", "4000", "--config", "litellm_config.yml"] diff --git a/ai_chat/__init__.py b/ai_chat/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ai_chat/agents.py b/ai_chat/agents.py new file mode 100644 index 0000000000..1ac5a7a4b7 --- /dev/null +++ b/ai_chat/agents.py @@ -0,0 +1,467 @@ +"""Agent service classes for the AI chatbots""" + +import json +import logging +from abc import ABC, abstractmethod +from typing import Optional + +import pydantic +import requests +from django.conf import settings +from django.core.cache import caches +from django.utils.module_loading import import_string +from llama_index.agent.openai import OpenAIAgent +from llama_index.core.agent import AgentRunner +from llama_index.core.base.llms.types import ChatMessage +from llama_index.core.constants import DEFAULT_TEMPERATURE +from llama_index.core.tools import FunctionTool, ToolMetadata +from llama_index.llms.openai import OpenAI +from openai import BadRequestError +from pydantic import Field + +from ai_chat.constants import AIModelAPI +from ai_chat.utils import enum_zip +from learning_resources.constants import LearningResourceType, OfferedBy + +log = logging.getLogger(__name__) + + +class BaseChatAgent(ABC): + """ + Base service class for an AI chat agent + + Llamaindex was chosen to implement this because it provides + a far easier framework than native OpenAi or LiteLLM to + handle function calling completions. With LiteLLM/OpenAI, + the first response may or may not be the result of a + function call, so it's necessary to check the response. + If it did call a function, then a second completion is needed + to get the final response with the function call result added + to the chat history. Llamaindex handles this automatically. + + For comparison see: + https://docs.litellm.ai/docs/completion/function_call + """ + + INSTRUCTIONS = "Provide instructions for the AI assistant" + + # For LiteLLM tracking purposes + JOB_ID = "BASECHAT_JOB" + TASK_NAME = "BASECHAT_TASK" + + def __init__( # noqa: PLR0913 + self, + name: str, + *, + model: Optional[str] = None, + temperature: Optional[float] = None, + instructions: Optional[str] = None, + user_id: Optional[str] = None, + save_history: Optional[bool] = False, + cache_key: Optional[str] = None, + cache_timeout: Optional[int] = None, + ): + """Initialize the AI chat agent service""" + self.assistant_name = name + self.ai = settings.AI_MODEL_API + self.model = model or settings.AI_MODEL + self.save_history = save_history + self.temperature = temperature or DEFAULT_TEMPERATURE + self.instructions = instructions or self.INSTRUCTIONS + self.user_id = user_id + if settings.AI_PROXY_CLASS: + self.proxy = import_string(f"ai_chat.proxy.{settings.AI_PROXY_CLASS}")() + else: + self.proxy = None + if save_history: + if not cache_key: + msg = "cache_key must be set to save chat history" + raise ValueError(msg) + self.cache = caches[settings.AI_CACHE] + self.cache_timeout = cache_timeout or settings.AI_CACHE_TIMEOUT + self.cache_key = cache_key + else: + self.cache = None + self.cache_timeout = None + self.cache_key = "" + self.agent = None + + def get_or_create_chat_history_cache(self, agent: AgentRunner) -> None: + """ + Get the user chat history from the cache and load it into the + llamaindex agent's chat history (agent.chat_history). + Create an empty cache key if it doesn't exist. + """ + if self.cache_key in self.cache: + try: + for message in json.loads(self.cache.get(self.cache_key)): + agent.chat_history.append(ChatMessage(**message)) + except json.JSONDecodeError: + self.cache.set(self.cache_key, "[]", timeout=self.cache_timeout) + else: + if self.proxy: + self.proxy.create_proxy_user(self.user_id) + self.cache.set(self.cache_key, "[]", timeout=self.cache_timeout) + + def create_agent(self) -> AgentRunner: + """Create an AgentRunner for the relevant AI source""" + if self.ai == AIModelAPI.openai.value: + return self.create_openai_agent() + else: + error = f"AI source {self.ai} is not supported" + raise NotImplementedError(error) + + def create_tools(self): + """Create any tools required by the agent""" + return [] + + @abstractmethod + def create_openai_agent(self) -> OpenAIAgent: + """Create an OpenAI agent""" + + def save_chat_history(self) -> None: + """Save the agent chat history to the cache""" + chat_history = [ + message.dict() + for message in self.agent.chat_history + if message.role != "tool" and message.content + ] + self.cache.set(self.cache_key, json.dumps(chat_history), timeout=3600) + + def clear_chat_history(self) -> None: + """Clear the chat history from the cache""" + if self.save_history: + self.agent.chat_history.clear() + self.cache.delete(self.cache_key) + self.get_or_create_chat_history_cache(self.agent) + + @abstractmethod + def get_comment_metadata(self): + """Yield markdown comments to send hidden metdata in the response""" + + def get_completion(self, message: str, *, debug: bool = settings.AI_DEBUG) -> str: + """ + Send the user message to the agent and yield the response as + it comes in. + + Append the response with debugging metadata and/or errors. + """ + if not self.agent: + error = "Create agent before running" + raise ValueError(error) + try: + response = self.agent.stream_chat( + message, + ) + response_gen = response.response_gen + yield from response_gen + except BadRequestError as error: + # Format and yield an error message inside a hidden comment + if hasattr(error, "response"): + error = error.response.json() + else: + error = { + "error": {"message": "An error has occurred, please try again"} + } + if ( + error["error"]["message"].startswith("Budget has been exceeded") + and not settings.AI_DEBUG + ): # Friendlier message for end user + error["error"]["message"] = ( + "You have exceeded your AI usage limit. Please try again later." + ) + yield f"".encode() + except Exception: + yield '' + log.exception("Error running AI agent") + if self.save_history: + self.save_chat_history() + if debug: + yield f"\n\n\n\n".encode() + + +class SearchAgent(BaseChatAgent): + """Service class for the AI search function agent""" + + JOB_ID = "SEARCH_JOB" + TASK_NAME = "SEARCH_TASK" + + INSTRUCTIONS = f"""You are an assistant helping users find courses from a catalog +of learning resources. Users can ask about specific topics, levels, or recommendations +based on their interests or goals. + +Your job: +1. Understand the user's intent AND BACKGROUND based on their message. +2. Use the available function to gather information or recommend courses. +3. Provide a clear, user-friendly explanation of your recommendations if search results +are found. + + +Always run the tool to answer questions, and answer only based on the function search +results. VERY IMPORTANT: NEVER USE ANY INFORMATION OUTSIDE OF THE MIT SEARCH RESULTS TO +ANSWER QUESTIONS. If no results are returned, say you could not find any relevant +resources. Don't say you're going to try again. Ask the user if they would like to +modify their preferences or ask a different question. + +Here are some guidelines on when to use the possible filters in the search function: + +q: The area of interest requested by the user. NEVER INCLUDE WORDS SUCH AS "advanced" +or "introductory" IN THIS PARAMETER! If the user asks for introductory, intermediate, +or advanced courses, do not include that in the search query, but examine the search +results to determine which most closely match the user's desired education level and/or +their educational background (if either is provided) and choose those results to return +to the user. If the user asks what other courses are taught by a particular instructor, +search the catalog for courses taught by that instructor using the instructor's name +as the value for this parameter. + +offered_by: If a user asks for resources "offered by" or "from" an institution, +you should include this parameter based on the following +dictionary: {OfferedBy.as_dict()} DO NOT USE THE offered_by FILTER OTHERWISE. + +certificate: true if the user is interested in resources that offer certificates, false +if the user does not want resources with a certificate offered. Do not used this filter +if the user does not indicate a preference. + +free: true if the user is interested in free resources, false if the user is only +interested in paid resources. Do not used this filter if the user does not indicate +a preference. + +resource_type: If the user mentions courses, programs, videos, or podcasts in +particular, filter the search by this parameter. DO NOT USE THE resource_type FILTER +OTHERWISE. You MUST combine multiple resource types in one request like this: +"resource_type=course&resource_type=program". Do not attempt more than one query per +user message. If the user asks for podcasts, filter by both "podcast" and +"podcast_episode". + +Respond in this format: +- If the user's intent is unclear, ask clarifying questions about users preference on +price, certificate +- Understand user background from the message history, like their level of education. +- After the function executes, rerank results based on user background and recommend +1 or 2 courses to the user +- Make the title of each resource a clickable link. + +VERY IMPORTANT: NEVER USE ANY INFORMATION OUTSIDE OF THE MIT SEARCH RESULTS TO ANSWER +QUESTIONS. + +Here are some sample user prompts, each with a guide on how to respond to them: + +Prompt: “I\'d like to learn some advanced mathematics that I may not have had exposure +to before, as a physics major.” +Expected Response: Ask some follow-ups about particular interests (e.g., set theory, +analysis, topology. Maybe ask whether you are more interested in applied math or theory. +Then perform the search based on those interests and send the most relevant results back +based on the user's answers. + +Prompt: “As someone with a non-science background, what courses can I take that will +prepare me for the AI future.” +Expected Output: Maybe ask whether the user wants to learn how to program, or just use +AI in their discipline - does this person want to study machine learning? More info +needed. Then perform a relevant search and send back the best results. + +And here are some recommended search parameters to apply for sample user prompts: + +User: "I am interested in learning advanced AI techniques" +Search parameters: {{"q": "AI techniques"}} + +User: "I am curious about AI applications for business" +Search parameters: {{"q": "AI business"}} + +User: "I want free basic courses about climate change from OpenCourseware" +Search parameters: {{"q": "climate change", "free": true, "resource_type": ["course"], +"offered_by": "ocw"}} + +User: "I want to learn some advanced mathematics" +Search parameters: {{"q": "mathematics"}} + """ + + class SearchToolSchema(pydantic.BaseModel): + """Schema for searching MIT learning resources. + + Attributes: + q: The search query string + resource_type: Filter by type of resource (course, program, etc) + free: Filter for free resources only + certificate: Filter for resources offering certificates + offered_by: Filter by institution offering the resource + """ + + q: str = Field( + description=( + "Query to find resources. Never use level terms like 'advanced' here" + ) + ) + resource_type: Optional[ + list[enum_zip("resource_type", LearningResourceType)] + ] = Field( + default=None, + description="Type of resource to search for: course, program, video, etc", + ) + free: Optional[bool] = Field( + default=None, + description="Whether the resource is free to access, true|false", + ) + certificate: Optional[bool] = Field( + default=None, + description=( + "Whether the resource offers a certificate upon completion, true|false" + ), + ) + offered_by: Optional[enum_zip("offered_by", OfferedBy)] = Field( + default=None, + description="Institution that offers the resource: ocw, mitxonline, etc", + ) + + model_config = { + "json_schema_extra": { + "examples": [ + { + "q": "machine learning", + "resource_type": ["course"], + "free": True, + "certificate": False, + "offered_by": "MIT", + } + ] + } + } + + def __init__( # noqa: PLR0913 + self, + name: str, + *, + model: Optional[str] = None, + temperature: Optional[float] = None, + instructions: Optional[str] = None, + user_id: Optional[str] = None, + save_history: Optional[bool] = False, + cache_key: Optional[str] = None, + cache_timeout: Optional[int] = None, + ): + """Initialize the AI search agent service""" + super().__init__( + name, + model=model or settings.AI_MODEL, + temperature=temperature, + instructions=instructions, + save_history=save_history, + user_id=user_id, + cache_key=cache_key, + cache_timeout=cache_timeout or settings.AI_CACHE_TIMEOUT, + ) + self.search_parameters = [] + self.search_results = [] + self.agent = self.create_agent() + self.create_agent() + + def search_courses(self, q: str, **kwargs) -> str: + """ + Query the MIT API for learning resources, and + return simplified results as a JSON string + """ + + params = {"q": q, "limit": settings.AI_MIT_SEARCH_LIMIT} + + valid_params = { + "resource_type": kwargs.get("resource_type"), + "free": kwargs.get("free"), + "offered_by": kwargs.get("offered_by"), + "certificate": kwargs.get("certificate"), + } + params.update({k: v for k, v in valid_params.items() if v is not None}) + self.search_parameters.append(params) + try: + response = requests.get( + settings.AI_MIT_SEARCH_URL, params=params, timeout=30 + ) + response.raise_for_status() + raw_results = response.json().get("results", []) + # Simplify the response to only include the main properties + main_properties = [ + "title", + "url", + "description", + "offered_by", + "free", + "certification", + "resource_type", + ] + simplified_results = [] + for result in raw_results: + simplified_result = {k: result.get(k) for k in main_properties} + # Instructors and level will be in the runs data if present + next_date = result.get("next_start_date", None) + raw_runs = result.get("runs", []) + best_run = None + if next_date: + runs = [run for run in raw_runs if run["start_date"] == next_date] + if runs: + best_run = runs[0] + elif raw_runs: + best_run = raw_runs[-1] + if best_run: + for attribute in ("level", "instructors"): + simplified_result[attribute] = best_run.get(attribute, []) + simplified_results.append(simplified_result) + self.search_results.extend(simplified_results) + return json.dumps(simplified_results) + except requests.exceptions.RequestException: + log.exception("Error querying MIT API") + return json.dumps({"error": "An error occurred while searching"}) + + def create_openai_agent(self) -> OpenAIAgent: + """ + Create an OpenAI-specific llamaindex agent for function calling + + Using `OpenAI` instead of a more universal `LiteLLM` because + the `LiteLLM` class as implemented by llamaindex does not + support function calling. ie: + agent = FunctionCallingAgentWorker.from_tools(.... + > AssertionError: llm must be an instance of FunctionCallingLLM + """ + llm = OpenAI( + model=self.model, + **(self.proxy.get_api_kwargs() if self.proxy else {}), + additional_kwargs=( + self.proxy.get_additional_kwargs(self) if self.proxy else {} + ), + ) + if self.temperature: + llm.temperature = self.temperature + agent = OpenAIAgent.from_tools( + tools=self.create_tools(), + llm=llm, + verbose=True, + system_prompt=self.instructions, + ) + if self.save_history: + self.get_or_create_chat_history_cache(agent) + return agent + + def create_tools(self): + """Create tools required by the agent""" + return [self.create_search_tool()] + + def create_search_tool(self) -> FunctionTool: + """Create the search tool for the AI agent""" + metadata = ToolMetadata( + name="search_courses", + description="Search for learning resources in the MIT catalog", + fn_schema=self.SearchToolSchema, + ) + return FunctionTool.from_defaults( + fn=self.search_courses, tool_metadata=metadata + ) + + def get_comment_metadata(self) -> str: + """ + Yield markdown comments to send hidden metadata in the response + """ + metadata = { + "metadata": { + "search_parameters": self.search_parameters, + "search_results": self.search_results, + "system_prompt": self.instructions, + } + } + return json.dumps(metadata) diff --git a/ai_chat/agents_test.py b/ai_chat/agents_test.py new file mode 100644 index 0000000000..2ae8b415f0 --- /dev/null +++ b/ai_chat/agents_test.py @@ -0,0 +1,259 @@ +"""Tests for AI agent services.""" + +import json + +import pytest +from django.conf import settings +from django.core.cache import caches +from llama_index.core.base.llms.types import MessageRole +from llama_index.core.constants import DEFAULT_TEMPERATURE + +from ai_chat.agents import SearchAgent +from ai_chat.factories import ChatMessageFactory +from learning_resources.factories import LearningResourceFactory +from learning_resources.serializers import ( + CourseResourceSerializer, +) +from main.factories import UserFactory +from main.test_utils import assert_json_equal + + +@pytest.fixture(autouse=True) +def ai_settings(settings): + """Set the AI settings for the tests.""" + settings.AI_CACHE = "default" + settings.AI_PROXY_URL = "" + return settings + + +@pytest.fixture +def chat_history(): + """Return one round trip chat history for testing.""" + return [ + ChatMessageFactory(role=MessageRole.USER), + ChatMessageFactory(role=MessageRole.ASSISTANT), + ] + + +@pytest.mark.parametrize( + ("model", "temperature", "instructions"), + [ + ("gpt-3.5-turbo", 0.1, "Answer this question as best you can"), + ("gpt-4o", 0.3, None), + ("gpt-4", None, None), + (None, None, None), + ], +) +def test_search_agent_service_initialization_defaults(model, temperature, instructions): + """Test the SearchAgent class instantiation.""" + name = "My search agent" + user_id = "testuser@test.edu" + + search_agent = SearchAgent( + name, + model=model, + temperature=temperature, + instructions=instructions, + user_id=user_id, + ) + assert search_agent.model == (model if model else settings.AI_MODEL) + assert search_agent.temperature == ( + temperature if temperature else DEFAULT_TEMPERATURE + ) + assert search_agent.instructions == ( + instructions if instructions else search_agent.instructions + ) + assert search_agent.agent.__class__.__name__ == "OpenAIAgent" + assert search_agent.agent.agent_worker._llm.model == ( # noqa: SLF001 + model if model else settings.AI_MODEL + ) + + +@pytest.mark.parametrize( + ("cache_key", "cache_timeout", "save_history"), + [ + ("test_cache_key", 60, True), + ("test_cache_key", 60, False), + (None, 60, True), + (None, 60, False), + ("test_cache_key", None, True), + ("test_cache_key", None, False), + ], +) +def test_search_agent_service_chat_history_settings( + cache_key, cache_timeout, save_history +): + """Test that the SearchAgent chat history settings are set correctly.""" + if save_history and not cache_key: + with pytest.raises( + ValueError, match="cache_key must be set to save chat history" + ): + SearchAgent( + "test agent", + cache_key=cache_key, + cache_timeout=cache_timeout, + save_history=save_history, + ) + else: + service = SearchAgent( + "test agent", + cache_key=cache_key, + cache_timeout=cache_timeout, + save_history=save_history, + ) + assert service.cache_key == (cache_key if save_history else "") + assert service.cache_timeout == ( + (cache_timeout if cache_timeout else settings.AI_CACHE_TIMEOUT) + if save_history + else None + ) + + +def test_get_or_create_chat_history_cache(settings, user, chat_history): + """Test that the SearchAgent get_or_create_chat_history_cache method works.""" + + caches[settings.AI_CACHE].set( + f"{user.email}_test_cache_key", + json.dumps([message.dict() for message in chat_history]), + ) + user_service = SearchAgent( + "test agent", + user_id=user.email, + cache_key=f"{user.email}_test_cache_key", + save_history=True, + ) + assert [ + (message.role, message.content) for message in user_service.agent.chat_history + ] == [(message.role, message.content) for message in chat_history] + + # Different user should have different chat history + user2 = UserFactory.create() + user2_service = SearchAgent( + "test agent", + user_id=user2.email, + cache_key=f"{user2.email}_test_cache_key", + save_history=True, + ) + assert user2_service.agent.chat_history == [] + + # Same user different cache should have different chat history + user_service2 = SearchAgent( + "test agent", + user_id=user.email, + cache_key=f"{user.email}_other_cache_key", + save_history=True, + ) + assert user_service2.agent.chat_history == [] + + # Chat history should be cleared out if requested + assert len(user_service.agent.chat_history) == 2 + user_service.clear_chat_history() + assert user_service.agent.chat_history == [] + assert caches[settings.AI_CACHE].get(f"{user.email}_test_cache_key") == "[]" + + +def test_clear_chat_history(client, user, chat_history): + """Test that the SearchAgent clears chat_history.""" + + caches[settings.AI_CACHE].set( + f"{user.email}_test_cache_key", + json.dumps([message.dict() for message in chat_history]), + ) + search_agent = SearchAgent( + "test agent", + user_id=user.email, + cache_key=f"{user.email}_test_cache_key", + save_history=True, + ) + assert len(search_agent.agent.chat_history) == 2 + assert ( + len(json.loads(caches[settings.AI_CACHE].get(f"{user.email}_test_cache_key"))) + == 2 + ) + + search_agent.clear_chat_history() + assert search_agent.agent.chat_history == [] + assert caches[settings.AI_CACHE].get(f"{user.email}_test_cache_key") == "[]" + + +@pytest.mark.django_db +def test_search_agent_tool(settings, mocker): + """The search agent tool should be created and function correctly.""" + settings.AI_MIT_SEARCH_LIMIT = 5 + retained_attributes = [ + "title", + "url", + "description", + "offered_by", + "free", + "certification", + "resource_type", + ] + raw_results = [ + CourseResourceSerializer(resource).data + for resource in LearningResourceFactory.create_batch(5) + ] + expected_results = [ + {key: resource.get("key") for key in retained_attributes} + for resource in raw_results + ] + mock_post = mocker.patch( + "ai_chat.agents.requests.get", + return_value=mocker.Mock( + json=mocker.Mock(return_value={"results": expected_results}) + ), + ) + search_agent = SearchAgent("test agent") + search_parameters = { + "q": "physics", + "resource_type": ["course", "program"], + "free": False, + "certificate": True, + "offered_by": "xpro", + "limit": 5, + } + tool = search_agent.create_tools()[0] + results = tool._fn(**search_parameters) # noqa: SLF001 + mock_post.assert_called_once_with( + settings.AI_MIT_SEARCH_URL, params=search_parameters, timeout=30 + ) + assert_json_equal(json.loads(results), expected_results) + + +@pytest.mark.django_db +def test_get_completion(mocker): + """Test that the SearchAgent get_completion method returns expected values.""" + metadata = { + "metadata": { + "search_parameters": {"q": "physics"}, + "search_results": [ + CourseResourceSerializer(resource).data + for resource in LearningResourceFactory.create_batch(5) + ], + "system_prompt": SearchAgent.INSTRUCTIONS, + } + } + expected_return_value = [b"Here ", b"are ", b"some ", b"results"] + mocker.patch( + "ai_chat.agents.OpenAIAgent.stream_chat", + return_value=mocker.Mock(response_gen=iter(expected_return_value)), + ) + search_agent = SearchAgent("test agent") + search_agent.search_parameters = metadata["metadata"]["search_parameters"] + search_agent.search_results = metadata["metadata"]["search_results"] + search_agent.instructions = metadata["metadata"]["system_prompt"] + search_agent.search_parameters = {"q": "physics"} + search_agent.search_results = [ + CourseResourceSerializer(resource).data + for resource in LearningResourceFactory.create_batch(5) + ] + results = "".join( + [ + str(chunk) + for chunk in search_agent.get_completion( + "I want to learn physics", + ) + ] + ) + search_agent.agent.stream_chat.assert_called_once_with("I want to learn physics") + assert "".join([str(value) for value in expected_return_value]) in results diff --git a/ai_chat/apps.py b/ai_chat/apps.py new file mode 100644 index 0000000000..46f8a6f39e --- /dev/null +++ b/ai_chat/apps.py @@ -0,0 +1,9 @@ +"""learning_resources app config""" + +from django.apps import AppConfig + + +class AiChatConfig(AppConfig): + """AI Chat Appconfig""" + + name = "ai_chat" diff --git a/ai_chat/conftest.py b/ai_chat/conftest.py new file mode 100644 index 0000000000..8f4bf20cda --- /dev/null +++ b/ai_chat/conftest.py @@ -0,0 +1,10 @@ +import pytest + + +@pytest.fixture +def mock_search_agent_service(mocker): + """Mock the SearchAgentService class.""" + return mocker.patch( + "ai_chat.views.SearchAgentService", + autospec=True, + ) diff --git a/ai_chat/constants.py b/ai_chat/constants.py new file mode 100644 index 0000000000..311ae33a77 --- /dev/null +++ b/ai_chat/constants.py @@ -0,0 +1,15 @@ +"""Constants for the AI Chat application.""" + +from named_enum import ExtendedEnum + + +class AIModelAPI(ExtendedEnum): + """ + Enum for AI model APIs. Add new AI APIs here. + """ + + openai = "openai" + + +GROUP_STAFF_AI_SYTEM_PROMPT_EDITORS = "ai_system_prompt_editors" +AI_ANONYMOUS_USER = "anonymous" diff --git a/ai_chat/factories.py b/ai_chat/factories.py new file mode 100644 index 0000000000..af9509947a --- /dev/null +++ b/ai_chat/factories.py @@ -0,0 +1,18 @@ +"""Test factory classes for ai_chat tests""" + +import factory +from factory.fuzzy import FuzzyChoice +from llama_cloud import ChatMessage +from llama_index.core.base.llms.types import MessageRole + + +class ChatMessageFactory(factory.Factory): + """Factory for generating llamaindex ChatMessage instances.""" + + role = FuzzyChoice(MessageRole.USER, MessageRole.ASSISTANT) + content = factory.Faker("sentence") + id = name = factory.Sequence(lambda n: "%d" % n) + index = factory.Sequence(lambda n: "%d" % n) + + class Meta: + model = ChatMessage diff --git a/ai_chat/migrations/0001_initial.py b/ai_chat/migrations/0001_initial.py new file mode 100644 index 0000000000..5d1ff2678d --- /dev/null +++ b/ai_chat/migrations/0001_initial.py @@ -0,0 +1,30 @@ +# Generated by Django 3.1 on 2021-01-28 16:27 +from django.conf import settings +from django.contrib.auth.models import Group +from django.db import migrations + +from ai_chat import constants + + +def add_ai_prompt_editor_group(apps, schema_editor): + """ + Create the staff list editors group + """ + Group.objects.get_or_create(name=constants.GROUP_STAFF_AI_SYTEM_PROMPT_EDITORS) + + +def remove_ai_prompt_editor_group(apps, schema_editor): + """ + Delete the staff list editors group + """ + Group.objects.filter(name=constants.GROUP_STAFF_AI_SYTEM_PROMPT_EDITORS).delete() + + +class Migration(migrations.Migration): + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.RunPython(add_ai_prompt_editor_group, remove_ai_prompt_editor_group), + ] diff --git a/ai_chat/migrations/__init__.py b/ai_chat/migrations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ai_chat/permissions.py b/ai_chat/permissions.py new file mode 100644 index 0000000000..bcf37c213b --- /dev/null +++ b/ai_chat/permissions.py @@ -0,0 +1,13 @@ +"""Permissions for ai chat agents""" + +from rest_framework import permissions + +from main import features + + +class SearchAgentPermissions(permissions.BasePermission): + """Permissions for ai search service""" + + def has_permission(self, request, view): # noqa: ARG002 + """Check if the user has permission""" + return features.is_enabled("recommendation-bot") diff --git a/ai_chat/proxy.py b/ai_chat/proxy.py new file mode 100644 index 0000000000..67b921e5d7 --- /dev/null +++ b/ai_chat/proxy.py @@ -0,0 +1,111 @@ +"""AI proxy helper classes""" + +import logging +from abc import ABC, abstractmethod +from urllib.parse import urljoin + +import requests +from django.conf import settings + +from ai_chat.agents import BaseChatAgent +from ai_chat.constants import AI_ANONYMOUS_USER + +log = logging.getLogger(__name__) + + +class AIProxy(ABC): + """Abstract base helper class for an AI proxy/gateway.""" + + REQUIRED_SETTINGS = [] + + def __init__(self): + """Raise an error if required settings are missing.""" + missing_settings = [ + setting + for setting in self.REQUIRED_SETTINGS + if not getattr(settings, setting, None) + ] + if missing_settings: + message = ",".join(missing_settings) + raise ValueError(message) + + @abstractmethod + def get_api_kwargs(self) -> dict: + """Get the api kwargs required to connect to the proxy.""" + + @abstractmethod + def get_additional_kwargs(self, service: BaseChatAgent) -> dict: + """Get any additional kwargs that should be sent to the proxy""" + + @abstractmethod + def create_proxy_user(self, endpoint: str) -> None: + """Create a proxy user.""" + + +class LiteLLMProxy(AIProxy): + """Helper class for the Lite LLM proxy.""" + + REQUIRED_SETTINGS = ("AI_PROXY_URL", "AI_PROXY_AUTH_TOKEN") + + def get_api_kwargs(self) -> dict: + return { + "api_base": settings.AI_PROXY_URL, + "api_key": settings.AI_PROXY_AUTH_TOKEN, + } + + def get_additional_kwargs(self, service: BaseChatAgent) -> dict: + return { + "user": service.user_id, + "store": True, + "extra_body": { + "metadata": { + "tags": [ + f"jobID:{service.JOB_ID}", + f"taskName:{service.TASK_NAME}", + ] + } + }, + } + + def create_proxy_user(self, user_id, endpoint="new") -> None: + """ + Set the rate limit for the user by creating a LiteLLM customer account. + Anonymous users will share the same rate limit. + """ + if settings.AI_PROXY_URL and settings.AI_PROXY_AUTH_TOKEN: + # Unauthenticated users will share a common budget/rate limit, + # so multiply for some extra capacity + multiplier = ( + settings.AI_ANON_LIMIT_MULTIPLIER if user_id == AI_ANONYMOUS_USER else 1 + ) + request_json = { + "user_id": user_id, + "max_parallel_requests": settings.AI_MAX_PARALLEL_REQUESTS * multiplier, + "tpm_limit": settings.AI_TPM_LIMIT * multiplier, + "rpm_limit": settings.AI_RPM_LIMIT * multiplier, + "max_budget": settings.AI_MAX_BUDGET * multiplier, + "budget_duration": settings.AI_BUDGET_DURATION, + } + headers = {"Authorization": f"Bearer {settings.AI_PROXY_AUTH_TOKEN}"} + try: + response = requests.post( + urljoin(settings.AI_PROXY_URL, f"/customer/{endpoint}"), + json=request_json, + timeout=settings.REQUESTS_TIMEOUT, + headers=headers, + ) + response.raise_for_status() + except Exception as error: + if "duplicate key value violates unique constraint" in str(error): + """ + Try updating the LiteLLM customer account if it already exists. + Unfortunately, LiteLLM seems to have a bug that prevents + updates to the customer's max_budget: + https://github.com/BerriAI/litellm/issues/6920 + + We could create LiteLLM internal user accounts instead, but that + would require saving and using the LiteLLM keys generated per user. + """ + self.create_proxy_user(user_id=user_id, endpoint="update") + else: + log.exception("Error creating/updating proxy customer account") diff --git a/ai_chat/serializers.py b/ai_chat/serializers.py new file mode 100644 index 0000000000..4ae9d73927 --- /dev/null +++ b/ai_chat/serializers.py @@ -0,0 +1,37 @@ +"""Serializers for the ai_chat app""" + +from django.conf import settings +from rest_framework import serializers + +from ai_chat.constants import GROUP_STAFF_AI_SYTEM_PROMPT_EDITORS + + +class ChatRequestSerializer(serializers.Serializer): + """DRF serializer for chatbot requests""" + + message = serializers.CharField(allow_blank=False) + model = serializers.CharField(default=settings.AI_MODEL, required=False) + temperature = serializers.FloatField(min_value=0.0, max_value=1.0, required=False) + instructions = serializers.CharField(required=False) + clear_history = serializers.BooleanField(default=False) + + def validate_instructions(self, value): + """Check if the user is allowed to modify the AI system prompt""" + if value: + request = self.context.get("request") + user = request.user + if settings.ENVIRONMENT == "dev" or ( + user + and user.is_authenticated + and ( + user.is_superuser + or user.groups.filter( + name=GROUP_STAFF_AI_SYTEM_PROMPT_EDITORS + ).first() + is not None + ) + ): + return value + msg = "You are not allowed to modify the AI system prompt." + raise serializers.ValidationError(msg) + return value diff --git a/ai_chat/urls.py b/ai_chat/urls.py new file mode 100644 index 0000000000..1b61ccb096 --- /dev/null +++ b/ai_chat/urls.py @@ -0,0 +1,14 @@ +"""URLs for the ai_chat app.""" + +from django.urls import include, re_path + +from ai_chat import views + +app_name = "ai_chat" + +v0_urls = [ + re_path(r"^chat_agent/", views.SearchAgentView.as_view(), name="chatbot_agent_api"), +] +urlpatterns = [ + re_path(r"^api/v0/", include((v0_urls, "v0"))), +] diff --git a/ai_chat/utils.py b/ai_chat/utils.py new file mode 100644 index 0000000000..4a8001041e --- /dev/null +++ b/ai_chat/utils.py @@ -0,0 +1,11 @@ +"""Utility functions for ai chat agents""" + +import logging +from enum import Enum + +log = logging.getLogger(__name__) + + +def enum_zip(label: str, enum: Enum) -> type[Enum]: + """Create a new Enum from a tuple of Enum names""" + return Enum(label, dict(zip(enum.names(), enum.names()))) diff --git a/ai_chat/views.py b/ai_chat/views.py new file mode 100644 index 0000000000..3b39e7bea2 --- /dev/null +++ b/ai_chat/views.py @@ -0,0 +1,65 @@ +"""DRF views for the ai_chat app.""" + +import logging + +from django.http import StreamingHttpResponse +from drf_spectacular.utils import extend_schema +from rest_framework import views +from rest_framework.request import Request + +from ai_chat import serializers +from ai_chat.agents import SearchAgent +from ai_chat.permissions import SearchAgentPermissions + +log = logging.getLogger(__name__) + + +class SearchAgentView(views.APIView): + """ + DRF view for an AI agent that answers user queries + by performing a relevant learning resources search. + """ + + http_method_names = ["post"] + serializer_class = serializers.ChatRequestSerializer + permission_classes = (SearchAgentPermissions,) # Add IsAuthenticated + + @extend_schema( + responses={ + (200, "text/event-stream"): { + "description": "Chatbot response", + "type": "string", + } + } + ) + def post(self, request: Request) -> StreamingHttpResponse: + """Handle a POST request to the chatbot agent.""" + serializer = serializers.ChatRequestSerializer( + data=request.data, context={"request": request} + ) + serializer.is_valid(raise_exception=True) + if not request.session.session_key: + request.session.save() + cache_id = ( + request.user.email + if request.user.is_authenticated + else request.session.session_key + ) + # Make anonymous users share a common LiteLLM budget/rate limit. + user_id = request.user.email if request.user.is_authenticated else "anonymous" + message = serializer.validated_data.pop("message", "") + clear_history = serializer.validated_data.pop("clear_history", False) + agent = SearchAgent( + "Learning Resource Search AI Assistant", + user_id=user_id, + cache_key=f"{cache_id}_search_chat_history", + save_history=True, + **serializer.validated_data, + ) + if clear_history: + agent.clear_chat_history() + return StreamingHttpResponse( + agent.get_completion(message), + content_type="text/event-stream", + headers={"X-Accel-Buffering": "no"}, + ) diff --git a/ai_chat/views_test.py b/ai_chat/views_test.py new file mode 100644 index 0000000000..77f9c89f5d --- /dev/null +++ b/ai_chat/views_test.py @@ -0,0 +1,57 @@ +"""Unit tests for the views module.""" + +import pytest +from rest_framework.reverse import reverse + + +@pytest.mark.parametrize("clear_history", [True, False]) +@pytest.mark.parametrize("is_authenticated", [True, False]) +def test_post_search_agent_endpoint( + mocker, client, user, clear_history, is_authenticated +): + """Test SearchAgentView post endpoint""" + mocker.patch( + "ai_chat.permissions.SearchAgentPermissions.has_permission", return_value=True + ) + expected_answer = [b"Here ", b"are ", b"some ", b"results"] + expected_user_id = user.email if is_authenticated else "anonymous" + user_message = "Do you have any good physics courses?" + temperature = 0.1 + system_prompt = "Answer this question as best you can" + mock_agent = mocker.patch("ai_chat.views.SearchAgent", autospec=True) + mock_agent.return_value.get_completion = mocker.Mock( + return_value=iter(expected_answer) + ) + model = "gpt-3.5-turbo" + if is_authenticated: + client.force_login(user) + resp = client.post( + f"{reverse('ai_chat:v0:chatbot_agent_api')}", + session=client.session, + data={ + "message": user_message, + "clear_history": clear_history, + "model": model, + "temperature": 0.1, + "instructions": system_prompt, + }, + ) + expected_cache_prefix = ( + user.email if is_authenticated else resp.request["session"].session_key + ) + mock_agent.assert_called_once_with( + "Learning Resource Search AI Assistant", + user_id=expected_user_id, + cache_key=f"{expected_cache_prefix}_search_chat_history", + save_history=True, + model=model, + instructions=system_prompt, + temperature=temperature, + ) + instantiated_agent = mock_agent.return_value + instantiated_agent.get_completion.assert_called_once_with(user_message) + assert instantiated_agent.clear_chat_history.call_count == ( + 1 if clear_history else 0 + ) + assert resp.status_code == 200 + assert list(resp.streaming_content) == expected_answer diff --git a/app.json b/app.json index 964e757b3a..cbb8264bdb 100644 --- a/app.json +++ b/app.json @@ -19,6 +19,66 @@ ], "description": "mit-learn", "env": { + "AI_CACHE_TIMEOUT": { + "description": "Timeout for AI cache", + "required": false + }, + "AI_DEBUG": { + "description": "Include debug information in AI responses if True", + "required": false + }, + "AI_MIT_SEARCH_URL": { + "description": "URL for AI search agent", + "required": false + }, + "AI_MODEL": { + "description": "Model to use for AI functionality", + "required": false + }, + "AI_MODEL_API": { + "description": "The API used for the AI model", + "required": false + }, + "AI_PROXY_CLASS": { + "description": "Proxy class for AI functionality", + "required": false + }, + "AI_PROXY_URL": { + "description": "URL for AI proxy", + "required": false + }, + "AI_PROXY_AUTH_TOKEN": { + "description": "Auth token for AI proxy", + "required": false + }, + "AI_MAX_PARALLEL_REQUESTS": { + "description": "Max parallel requests/user for AI functionality", + "required": false + }, + "AI_TPM_LIMIT": { + "description": "Tokens/minute limit per user for AI functionality", + "required": false + }, + "AI_RPM_LIMIT": { + "description": "Requests/minute limit per user for AI functionality", + "required": false + }, + "AI_BUDGET_DURATION": { + "description": "Length of time before a user's budget usage resets", + "required": false + }, + "AI_MAX_BUDGET": { + "description": "Max budget per user for AI functionality", + "required": false + }, + "AI_ANON_LIMIT_MULTIPLIER": { + "description": "Multiplier for per-user limit/budget shared by anonymous users", + "required": false + }, + "AI_MIT_SEARCH_LIMIT": { + "description": "Limit parameter value for AI search agent", + "required": false + }, "ALLOWED_HOSTS": { "description": "", "required": false diff --git a/config/litellm_config.yml b/config/litellm_config.yml new file mode 100644 index 0000000000..ca6dc1db3e --- /dev/null +++ b/config/litellm_config.yml @@ -0,0 +1,15 @@ +model_list: + - model_name: "*" + litellm_params: + model: openai/* + api_key: os.environ/OPENAI_API_KEY + +general_settings: + master_key: os.environ/LITELLM_MASTER_KEY + +litellm_settings: + # The following should set default customer budgets, but they are + # being ignored or not created (not sure which). + # https://docs.litellm.ai/docs/proxy/users + max_end_user_budget: os.environ/OPENAI_AI_MAX_BUDGET + max_end_user_budget_duration: os.environ/OPENAI_AI_MAX_BUDGET_DURATION diff --git a/docker-compose.litellm.yml b/docker-compose.litellm.yml new file mode 100644 index 0000000000..ee472ad56d --- /dev/null +++ b/docker-compose.litellm.yml @@ -0,0 +1,23 @@ +include: + - docker-compose.services.yml + +services: + litellm: + profiles: + - backend + build: + dockerfile: Dockerfile-litellm + ports: + - "4000:4000" + environment: + - DATABASE_URL=postgres://postgres:postgres@db:5432/litellm + - OPENAI_API_KEY=${OPENAI_API_KEY} + - LITELLM_MASTER_KEY=${AI_PROXY_AUTH_TOKEN} + - LITELLM_SALT_KEY=${AI_PROXY_AUTH_TOKEN} + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + volumes: + - ./config:/app diff --git a/fixtures/common.py b/fixtures/common.py index b99af80de1..5da3065c65 100644 --- a/fixtures/common.py +++ b/fixtures/common.py @@ -44,7 +44,7 @@ def warnings_as_errors(): # noqa: PT004 # Ignore deprecation warnings in third party libraries warnings.filterwarnings( "ignore", - module=".*(api_jwt|api_jws|rest_framework_jwt|astroid|bs4|celery|factory|botocore|posthog).*", + module=".*(api_jwt|api_jws|rest_framework_jwt|astroid|bs4|celery|factory|botocore|posthog|pydantic).*", category=DeprecationWarning, ) yield diff --git a/frontends/api/src/generated/v0/api.ts b/frontends/api/src/generated/v0/api.ts index 11743b483d..1dbc246f32 100644 --- a/frontends/api/src/generated/v0/api.ts +++ b/frontends/api/src/generated/v0/api.ts @@ -516,6 +516,43 @@ export interface ChannelUnitDetail { */ unit: LearningResourceOfferorDetail } +/** + * DRF serializer for chatbot requests + * @export + * @interface ChatRequestRequest + */ +export interface ChatRequestRequest { + /** + * + * @type {string} + * @memberof ChatRequestRequest + */ + message: string + /** + * + * @type {string} + * @memberof ChatRequestRequest + */ + model?: string + /** + * + * @type {number} + * @memberof ChatRequestRequest + */ + temperature?: number + /** + * + * @type {string} + * @memberof ChatRequestRequest + */ + instructions?: string + /** + * + * @type {boolean} + * @memberof ChatRequestRequest + */ + clear_history?: boolean +} /** * * @export @@ -7135,6 +7172,173 @@ export const ChannelsListChannelTypeEnum = { export type ChannelsListChannelTypeEnum = (typeof ChannelsListChannelTypeEnum)[keyof typeof ChannelsListChannelTypeEnum] +/** + * ChatAgentApi - axios parameter creator + * @export + */ +export const ChatAgentApiAxiosParamCreator = function ( + configuration?: Configuration, +) { + return { + /** + * Handle a POST request to the chatbot agent. + * @param {ChatRequestRequest} ChatRequestRequest + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + chatAgentCreate: async ( + ChatRequestRequest: ChatRequestRequest, + options: RawAxiosRequestConfig = {}, + ): Promise => { + // verify required parameter 'ChatRequestRequest' is not null or undefined + assertParamExists( + "chatAgentCreate", + "ChatRequestRequest", + ChatRequestRequest, + ) + const localVarPath = `/api/v0/chat_agent/` + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL) + let baseOptions + if (configuration) { + baseOptions = configuration.baseOptions + } + + const localVarRequestOptions = { + method: "POST", + ...baseOptions, + ...options, + } + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + localVarHeaderParameter["Content-Type"] = "application/json" + + setSearchParams(localVarUrlObj, localVarQueryParameter) + let headersFromBaseOptions = + baseOptions && baseOptions.headers ? baseOptions.headers : {} + localVarRequestOptions.headers = { + ...localVarHeaderParameter, + ...headersFromBaseOptions, + ...options.headers, + } + localVarRequestOptions.data = serializeDataIfNeeded( + ChatRequestRequest, + localVarRequestOptions, + configuration, + ) + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + } + }, + } +} + +/** + * ChatAgentApi - functional programming interface + * @export + */ +export const ChatAgentApiFp = function (configuration?: Configuration) { + const localVarAxiosParamCreator = ChatAgentApiAxiosParamCreator(configuration) + return { + /** + * Handle a POST request to the chatbot agent. + * @param {ChatRequestRequest} ChatRequestRequest + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async chatAgentCreate( + ChatRequestRequest: ChatRequestRequest, + options?: RawAxiosRequestConfig, + ): Promise< + (axios?: AxiosInstance, basePath?: string) => AxiosPromise + > { + const localVarAxiosArgs = await localVarAxiosParamCreator.chatAgentCreate( + ChatRequestRequest, + options, + ) + const index = configuration?.serverIndex ?? 0 + const operationBasePath = + operationServerMap["ChatAgentApi.chatAgentCreate"]?.[index]?.url + return (axios, basePath) => + createRequestFunction( + localVarAxiosArgs, + globalAxios, + BASE_PATH, + configuration, + )(axios, operationBasePath || basePath) + }, + } +} + +/** + * ChatAgentApi - factory interface + * @export + */ +export const ChatAgentApiFactory = function ( + configuration?: Configuration, + basePath?: string, + axios?: AxiosInstance, +) { + const localVarFp = ChatAgentApiFp(configuration) + return { + /** + * Handle a POST request to the chatbot agent. + * @param {ChatAgentApiChatAgentCreateRequest} requestParameters Request parameters. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + chatAgentCreate( + requestParameters: ChatAgentApiChatAgentCreateRequest, + options?: RawAxiosRequestConfig, + ): AxiosPromise { + return localVarFp + .chatAgentCreate(requestParameters.ChatRequestRequest, options) + .then((request) => request(axios, basePath)) + }, + } +} + +/** + * Request parameters for chatAgentCreate operation in ChatAgentApi. + * @export + * @interface ChatAgentApiChatAgentCreateRequest + */ +export interface ChatAgentApiChatAgentCreateRequest { + /** + * + * @type {ChatRequestRequest} + * @memberof ChatAgentApiChatAgentCreate + */ + readonly ChatRequestRequest: ChatRequestRequest +} + +/** + * ChatAgentApi - object-oriented interface + * @export + * @class ChatAgentApi + * @extends {BaseAPI} + */ +export class ChatAgentApi extends BaseAPI { + /** + * Handle a POST request to the chatbot agent. + * @param {ChatAgentApiChatAgentCreateRequest} requestParameters Request parameters. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof ChatAgentApi + */ + public chatAgentCreate( + requestParameters: ChatAgentApiChatAgentCreateRequest, + options?: RawAxiosRequestConfig, + ) { + return ChatAgentApiFp(this.configuration) + .chatAgentCreate(requestParameters.ChatRequestRequest, options) + .then((request) => request(this.axios, this.basePath)) + } +} + /** * CkeditorApi - axios parameter creator * @export diff --git a/frontends/main/package.json b/frontends/main/package.json index 484671c5e6..3d532a7b79 100644 --- a/frontends/main/package.json +++ b/frontends/main/package.json @@ -12,8 +12,11 @@ "dependencies": { "@ebay/nice-modal-react": "^1.2.13", "@emotion/cache": "^11.13.1", + "@emotion/styled": "^11.11.0", "@mitodl/course-search-utils": "3.3.2", "@next/bundle-analyzer": "^14.2.15", + "@nlux/react": "^2.17.1", + "@nlux/themes": "^2.17.1", "@remixicon/react": "^4.2.0", "@sentry/nextjs": "^8.36.0", "@tanstack/react-query": "^4.36.1", diff --git a/frontends/main/public/images/mit_mascot_tim.png b/frontends/main/public/images/mit_mascot_tim.png new file mode 100644 index 0000000000000000000000000000000000000000..030bf82939f2a1c82b27f6c4e5ad65d8a093a540 GIT binary patch literal 22493 zcmaHSWl)^mvM%oK5M*$N!6iWO;10pvoxwwJ2r|e3!QI{6U4sS*5S+n+6I{aO+vn_a z?vK0gR87tMu4-GWyJ~em-4msz@(~k_3=IYb22);6S_1|KmiXjT;6=apxm$qjTs`buoB{txG&6Vc^bnxl)rwQY%H72iWbx)ZYubNQxe7~xtjs)I zKw2&?j{iPLHCq=C7k671R{%F32NxHB>64warHi*a^S?Nhm4)S<-95~lEv)3FMQPq- zaM;;d3QO{G3-Is=3GfN=^Kx;?$O;Nb2?+53xh41nfzsSUeE;T^cCqkuvU2wLH?QUY z@(TV(-hbl3$@NXMv=zwC%gRy~y4Hva*V zico~u9}nQC!>sY)6$Fdo$PDMK8^cd$rEl;D<)nKeh?={!QpJdlEyMr5j`{59D#8O6 zKXkM13ub6f}m1eXMN_bROGmaq98>nv7R7~GtlX9;tT z`}x)yFLw7nJq2!v9n9pmK3*SLkXqv7SB@knMaaYcEq3~u1ZfOve^5^|Ja6YVwr_Oc#z>t_ zME9lnkM6bwM~mPn(ZLt~cRzTbc&++}e1Kgq zL*d-cF=`?!2Gyg!0G-z!|Ej+)kGfXykDjmHLx0|19F1?@+;_iR>m5(7{Fyv32V@+C zu#gesR`L?&*DNLd(X}h-rcPzT?^x2?E|ISSRV~&G>z(S;WJ$$Ju_S0sZ2k3jqJcFt z6l@zJlMWc&)Y@&65(H%O984RWmQH)UD&qpnKV6QkNij8ZWx5Hb9>ln>LKJZ%(+hn} zSLyJFa&eNi#OVf6qYMwp)fnw|py}@Xg%$U>FEc_T@%NkH?~TSfjbPs=x+FzO>ZX%5@oz;31v*96R}=~$<$5cC6@3~(@Yf0 z?~K7_uz})*=>p{#jYI^w%P%7LVMt>DWWD`@dkxg}SyD@|^X`K^x0G}ka7ehNh|seW z!7m!tmqAH)viLG+SnM_-yA}yFL`=zo$#9Vl12Ul}GeH)3tduCD;*`$XAy0_Si7giI zu*}(z)U?D9Lb)Ojp?v!))`snZ>eM*j=|12WeZEF4XF@C^AsQDKCH;W@uFW_V;()(c zDygi46T$>lDpb1YjG|Fi(|#Wnk`QT2`l^UEwnzG%aV-V~CzAfu5SMd&sJbAU5O;8* zr$-PcCM*PYBuyhYCJ~$jxGSNm7oU!{N&p%KL60_x4d;^0TaoRUWeztAjRswD>7 zb3qG;0uo99Z6RqEKsR3t>-oRC0RepYIn%)Q6P$FZfT0lBkYE!2_XOwhgjk+p62UVg zHv4>k_}Tj|JNCpQ8tw*ZH2lXAC%J~e_M|s>SgVr=l5FlWLz4b%BId!L`zC1d7q_u2 zX~2U-JKJ3t^<~J<^t*%=5^E;+x;{hjp1YyQlST=lDsu%g1&lIcL>NR61+7_}@?hgW zQ8z{bVU>LNeh;<|V_O`W2Zt_Vjbit8i>tTO91gonvS&|oWmLz;yDnnr z1VsJFQ%2*FdeCcgJ>n6ajTiaSo8uW3Rw3+N$l@;%$@Y<$&0NQqex+9qOhCq-16ft; zMnL>ihMfi4q+FtO`+$-;8DCmwA|?p79{3SsVM2==MRy zw`dWAqW5v+Pm`a$X96m>H%%G?KI&8)Stw3Lj===D=*N1#EZAw(8g-h1o4a~~9gzAX z4#LCSe+qgvni93Iz5~&7fpu*I;IILS5oZrx*phA*L!Tn__QLy@&R&--l`EOj5+Kyi z_TQ~5)pV@=U+iCnm-Jed8{?k$-7_t9{}PjL;P8#S%X);<>>OD`3_RQlhT@@Xp>r}T zYrW&P_-%PSOuxB{hklV+_^~QACvZA|cZ|~b>F3ST)A@uIbf&U$FAg!R!?&ajhERp5 zJ$N5Fu-IxLC_qvXeqa;ac>W|^9|eW&+3b7q?7U$1|OnQ!bqrj7N(alZ7GHVr|*)6kuJ z0~Q|vop194O|`v3)$iHH;ZTNg6nldFP(u>hOLTC3BfMIO5iWmZTX{oSW10k=dTEU#PLfjf>p4|uKM zT_-30{xvSYxC>8_Ui8|D^^fh}-aXD{8ny{UdRzEbVn|NYEH?Nb6Io3g;E*q9LL`N~ z7ugx!DfJX(($T-Hf4F(Z0!buu_ju3dv;_p6TOD)q>ub7X=f@agfGP}uuzKR@qQ6Qg zS;>7+Oe$asv?77hM~?CG?N3^zFUGcMMN)zIq#8mOD@Ssh6iFm;KI9c0NFDVSPqkHZ zsV2^`6eij4)aE4BSyrCaeb0by|aR2y9jfX7ZuRYY>i=; z(MA|QpUPr_sRI=#9f)O;(ft^^YUJ4z+tn_9LvISpkAjL>-<589HoGdtA3f5^57X; zB3O?8BFnKXK!%IvrdwK@okX{+jR-aYTgw3~S;b4PhzvYUWVFAIjY8##($-2vu;rCQ zSoMs*S(k`*Zg4ZveB9{r!wBgQ!=A4L?*+C)|Dshs^o=h4{={E*p1gczHFx1Psrm3+2Sq*+IoI8m>pEYc-L<%QtbgQ5Ve17UqojrxWi(L-qq*uPj*b~WVh5r*J2K0blVwk>ejw6J^L z9~*}(SWT*vKL4uSIQW`(DJs2Ltf|C7MfR`QR;l;uFKS4d5v<&p=j?HOftJ>ORY`*{ zzsO4armMvmWLB^uMl|BRT3ja7TqQIFjvVKe#g^h$6ZcXznDHLnDH!N~e@PQyUTpF- zX|aQna@l^U#L;ja|%bKaJoG{#YzXq)l!f@;xoPrjBHYw=QcHl?EZD;+;? z)+CAjVS@(w3!tv0lOExI z5_jE-TEqtr6d8BX+A@i4T$JQ_NbAOkdci9yPacPM9j2XHNWXW`(n}nllqxtj|SGLD5Pv-ug_=ha9O(}@#>aglzuR@>L|6xZ0ePQwK+s zliV6)@RG$31sW+fsKbxl>*&!il^9Nzqik?FyP{@yFb~g7*F``eiW6ndALR`P>I$pk zb`4S*V#cb`W(yflK_sLK0M|58X5}~0JsK@euU`CN<#3E0*nxc@>sCyu(Nm%l0tR|k z?MzEwW~@Cb5<7VZ!CRHkZTH~FhsQrjj=Y9h(aq$R0XkUMD%9Rc%L+K9gMrrNJ$xqj z_b9|ESpWDD!uv7KL21Vjc*uW63?2(-_*^Ux1blYY*)t&71RQ&)~M3b|z2|F`f^z*VabWLuN6nrk@D-OOeJ+xrdD^ z36NbWcFH^l_%0cw?Zw%cRJsx%U^}~XWRM$*b)poo99wS1E8Z+}8yV>D>>sYDAMA(D%b1&R##y~kXcHrmuH-7{fag({wFvA=^ zf`osPUf=?8=$VDOWM4p1c|*g*=fk;h{_-(X|3Ts7@?yoRxVYVPS`t5=Hh7A)1Br?m z(cZ}(1__uS{*K#)0}k9yN)qL&m{9XptmI#T6~U12N>aE&45>f__`C@LrGDxHNdp7n zw#s*wuh^+fJu5f{S&weF&21v=Rc0&2W&Sc86E^5P{YKkK& zIb*!YK#PK(@7np>cn!zCEI;yMO!_X6k)TcE_U3kpAlx;9B*F13X_&U~q~*YCm9C!Z zB-3B>NVQfKE35(!pD(#}KL@H|K2_;Y`D4E?J#3tx=p{Z)4Mo}x@gm!^RIdaQB)Ef6 zC4uT0w%{KN<)_1&5;8QODL$_77_-_z*B3@6lpK9VDZx7x%AGZ z%bD-o?kp}uyj)91JMpf4gMmOepOPWzC{{T$Z7#8CEWAZ&E5Al%gB;F{!{oLLI1`bf z+s4s0-L_E`NmlbQ3u;aZQjb##D_GxZwh|Bc-V`lqyV#J~*hoD|tR5C&KJ{DMlIBG> ztyLoUE;JsN?ho7pm6CV~!>6nRYIx}?>9{qgUzuP0Rk_W)iN;0hEz7H&eSXu|sEJDc zSbmoUpHyxjEk<0TM?g<3NpRL>_vo?t8tAx9yQHbg@m4`~6ufR%>Az3F(5p850j9<{ zMpoD1)(Vf&eAmt@5s8>5#2GHx|CgK+bqF@MUenq3AMbth28vk9awyT{G~!TO^o}D8 z4_sxArK#zzn-N`pQ#VYCLQaz^Sh&x0?pV1K6>k7U1N-=3`(q9Sz05jB=3-|%W2-&^ zNi&$R_6pXJh?Gq~n4mM@GH4Qr-&_>Xr5(!S#cbLb3Fv(h-D*Qzk%hf6aGR-@dbzn! z=SY3`#sx|z0NtSS>Ao<`&*Mgg7xV-Wexe@cO7{`mPcqp}<-LO&^(OdgI)GX`so{*{$lhh1Cd_O>&rliI$@q#82S->n$ztF#iLwK$Igu~twT5*iKA0WdY zi*&u)-Fn$h5Cv)%9-6?n&!h2Rlhk_PL|;V^alY3^qm4N$?ZKm#FlS+K6SZ7ukmwmJ zHf*{=km)pq#NBbhZbdE*8_G-KUQ7`7bWzqG{QgWr0*Z2KrWCbrnx#v`e1GhS=0m8V z3ksH}n>SNAIHp$U<6WE^mf%pJuTX2(x=j9_iCRHKnnaHZDzAcOMn&RTy?nm6^nM3o zh69<=#PklkKxonxiwqMH7y{NhmAxJzV5Ut*`NhPA8z2)6$gPF^UsN;9hs4YZWNwwj z37|`K2?0K*ne{gUj=h2fy66UE^&hOBh5<-=1Pa4^2Gz`l@wb+=vLkG0!HMQCI|bvv z54x1#g`0b|JeBhzZ9j>1+(feQOhdrb#Oz%9;vCfxS06b7Dr~z#yi$xsp2r4^*H3VQ z2jgJ4>E?5)^eh!%K+Z%~dOCM7tL)DnPN6p&Pkz;emA_Br_dfxAMTY;d6J{Bt$>BEm z>p5}T&AX%%5u>uEwohXbAPLbE7ysa`nxU`I3;PQ^ONnEO8)AuLSfm*(ukf$%?8_Fz zES;79&S}Sq$b*Z}C`F(P1VVrC>CMoqasaS5DwHzmqX=TA15yE7rvY0!z&~{Q;1ZlT zS+#9tDWK5Xbu4w|m3*Eq%QUZg7Haw%!RY3_yEsPxBIL|r&w-@`KaL+Ei5NsJfGgl0 zksJ!gt-j>aIu1Bg`s4_qry)csL&NO1%r+!9mzY^IT$4_h-PEo2O=3}IZ0c}u^#|~1 zASC~!4I8&ZVW@;A3Ye=0Z)UUCe7n0$PVO+lmIA`rrXS#a(+#n%m32diwvS7MoO#?y zRIT!cc&y#hQ##U|_Sh5c9zl2$YhX44%Hc1_$eZAK;HygDN-Km1( z94^-_5QMXOW^j{ONZ*YAw$|`rN+YBj?YKcp7A!q|^-txxvHmBx`Blofs1C^&IPEov zB%BDAVsp`Fe(y40mMro3-V4{Kvz{w?K<@J9zU@=P+AkGFMG@jdzcag*vm67)>u$g( zBC?kqfrM2`&Da6Rb5YmfmV&41^wa#WvP%_V=#E&FY80 zhfHRkx>~7y<$$c5T#D4ZOKTxsSV&5G2d)AUj5~H_Q9rb>#AMk>z{JUUSM07eeu@#n zz1VZdvXrWSOSvv;RmI(#mxfr94Sj5or|TdBgVJ)-kg~*(f2d=&*&EkM5EDNr12NZr zi4wQcT|XS+i!{=28ex0EbHjcae~lBY6#K-*xczwhrn{RrP3j5^w>s=JLC z5WO&lG-}M)bY7~{)#mvIDTHEU>p2KGZom9e3^T1^w*JGU4RTxdhduYL!tz+m<9AsL z(jepdlD6OwPx&+{e3dza(I|K~YQsltq6nghCqMRO-&}9(J@_--jHDj*?7~8=zluOZs(mF>qXh|Szh!x4-E8ODi{;G zsuEyy?cHF?aHR!YonTktnoy)HAE?WC?4V!yT&Vq|eK4XwvUu)eJAlpgo*AHu+AzmZ z=VX^{rvoF3nAcOZS!(AAV8-xX^+(krGx+zKwg9GZB}D!)c-O zpx?b=FO4o_NFl#ivElkRPeUPN$xyEGWbVlc{U;lAur<&ukzSs|OhP&Rg^IF4td7cn zHn80;r>H`jBd{4=7b76P)XtEafH-LNJTjWbv7*f*f*QB-Am*FhuBn zS`8D2`tk0qR#v@t&eYf1S1n4uo*}wYARxjA`K~b$dk$%sxpjOe+(nZs=0V!zY~j~` z*UE2~iZ6wVSm~PMFY5+teKAr!S?yf^s-q53q?wReKw&xhOkfl|Tax|ha=K=L0CQ46 zDcS%vAG(N~Lg0#x_!1FKb3o6Wi|*b!R8KnD5sTi2)f(UlYE2y6THD7_UL))Se0x`u zZTrj=U0a^#XO1bPL$-%!)Z7>KBy=pKB}%J4E{?&JVS+O+Pd{h66{-|NMKAU|1?K#t zRG2(mrzws%5&moZ^{YcIH?11n!pG1o(tF#2G?k0uNt!S?b1rIV+xahD%627|ujjc4 z?F$!3?Rs3H@I^uJVq{H%x=(-DHSCdozS|=lxt8}*N9owwN0q0WQ%3jM#-sxdP=mh) z>Y6h)J{YN%QA)9mZ_?%nvwfxPl!u@v6^|N=O~i_c1NKXobql#^+Pijh+Yv>Q5q0|y6lf$@02&$s^kK;W2|!X*6|vKrRb#@_|7 zOkit4rReU@+R33DRo{@jpg9OQIH1pTT^0q=Q+8Q()a7F5p{Con=LGUKF{q}axc;_~ zI@1S6V+M35A;lpRQWg$&Ca)Lpb~)jwDa0%g=l4B+$Yh94(0%J;AslLa{AE5db}X5{ z(D9bKg4P^Qot#@TGFtwr$TTb)y#|S%@?r~14ByY;#0IB8Lu7~ey3?}8d>ocA+;8Z$ z(DY3ws=hc&_Dclko;RO01_oHnABWkJosLRXSNB*ij8q#{e8t1m=S!2z<#yzU;$faz zV}#QZo+Pm^FL%9LOky->{W(oDscj9ZN9LiZPUc4seUZVX;V3dqcI4iO5bzR6W&L4rAEbG@l_z-5fOP#g9 zdq%Z2#`p!bTDH?y!-7m1C8`jGzK=mQUV5=!KTliUf#g|L7xtx}nX1(h`LINwFl11+BrfwPU$ z+`zy?I>L0ZJ3I|sTm*Dg(e|N$ESvFH&PXI{e5?xVi?PtW-#>%4-tL&ooVUINUMQ%3 z?}p108geI5NaqNL)!P?Sp2Q=;{XNPKcCk1^ODp)%HqSJL(=P`y$xsj$ch_-?ls{Rp zcHTka@c3FI`n2lvTjrZG8B(PAtUp)+xT$v$rp({FyjpfTe2^?5E-G^O%?iiY@Ty2#;C-(Fg?IQ4l5sSuIw z^a=!ohSx!+PBJx4FPk?0Od76}`es_Pd+(qgUS377A> zUYSQoVOspsPdXJKw6VMV`NC;THq3I1*f`}ANA+rD$_tinUxI@CaQs%iUx@uCE*wz^ zUM!Lnhkc#i(1}~km*pwk1UmJ!p9Yf=fAR?j0pv=6PF7wfuwibi_9F4Bw7Lwa2l`zAtg=}3utl!0-4%mTyYeIOu ziFBDa6PXtt$?mLJ`9`lYsy4=uU~G`O71?lkK-^o}&{N_Dsf&cTVGm|33eL18y*lzd z9~gc=5UP}eXnj3nA!;`bDx37i4L;K%p=-|J8ykekkd*PqYE=^1n>T~F*B+3(-A&VfvBG351ZZZ$hFaTH6NG67ZmF5p4cePbn*}gx~z&guGKf(|Hm&DxXPkRAtgSogwhrGaI*lnXIjFWoUaA%aJ#| z%d3gf)t4sjoD3sVtdB=e%xI4glK#D6&h4;en3eurf`2SoxpBi6pHy1>nL-R|Hei0C zpqCX_R+Z{FB@A0X2vNtv{PDylv8S=od+ckG23a-~xk#2@sAnuCDC+jmoh^wu zG>E{gJieV3-EVsKJ~r^64L3cqa_@GRzy67hZwk&^uS_mMuwAftpH**WuHmjLnEth~ zz<5WET>zT*$}v(A5AR*7ZK<*rrM>%^+|G$T1Z?cUb^AU1oc=pItITiu=#|oeJG`$8 zJAl-x*5M)lYqzbIy_Gk}k)?hX9;7X zTCRx?Y>4ouZP;!!C>i$VgKr4h=66lSRD=luW&*SXF>LuS9SM#ZLE{|t7t|+R+%60% zB-hFh@7IR?;1Jz>!IaeKf@tDw4m!0d0WbFz6|=eF7w!uVU@-g;D3g3pN@$lLe04bdfN-Px# zEg;x_K!0RAo!YBn4!?RR508Bjw7%DeZpl4nFaeCBzNCdNIRzk!Lga8CD$p;smFM92 z6WJM@5eCoB6U2k>FH-<>Bk3k+z^wJ8e^TS(y=1*lX;#&bpnpAymhP6yY_6`%^zw95 z&p~cz9!n)QoACyZ^1b!Ym#*uJms(lAYog2eEu#O9a}C{SBH6Z60D2rlM9PS(qScjp zMv27ioxb=tTseDQ9Os#8Gz)!8M!|jla#1VSvN?uOU&fG>AochwEX2a~wP%%;_;=to z0|=_zpU3Q>g{RwdGCDDXK2*>EM^=JU&HdA;TD;seW-nY#%RqqK zx*ou*Mt-ip=HB&WAR;W1M73H?3vt?4YO{q@YlY;^Jp4JR7W02R^m5xzj7XIZC1TJ> zD-rK#_JYxqMQtzCT%TFuXX1*76x}ULdQO`=o`XV1_@t>HnQ3>uN@{K-)ltJUL;%S@ zU|(o*<5QqCia@iLuYIPHg@;9-D<;O=sxs6Uma`iVn2vM>c2hFH?X`#}A7$*jFxvGr za50sidqft~yA){%*X4S?&3Y`2Nd{I1ns^%KvA2)mV8Hl^*@9NH(R1p&m*-{r`_`n! zc+hmJqFZX46c$4abIu)XvBK#HfAyMs7GJmSMQLpbL=|Yn<%+RBn^cbvw+A(2^?w6D z6W5(uP0{K55G^%DD;9h#FORmTTo-Lskylirh@u}*!ES=7r{*kzn@47`!k|trW7%8W z=l_Mx<2D3f7`IjBm>!n4I2VvXz}JeVjfV~8P(^T`zs{^a$T6{g#rRWl19`RlEf}!o zg;qH>rktp&c0d~~>3NFXv)EHY?d`dIf}^NNDTj!)>NbeX!sjo}@vRO={t8ctVQjd) zZV#(p39q}RGBdEcc09EI?qG;OV*XSFS4m$kQMW9&Vw)-jsD1R7z{}XNN0T-#kCV?} zmiHlOU@CB2yO+cd<43*=x#?obLA*>7Fp!I8F@S%YaL9n$;_(m=QK_DQ&` zD<(((7e=E@`Mkf#na46}%+5`M6?;Y^B`d-bpEO{_L?7Ne{OZRS{ri>8=!>=WxXcpv zE`PGLfzNl}SF-5i9;6XC|QT#MZYq1;TW6Cv6_f*+91jx319F7*UCE{g$Xq zUL=`abKj{jV3vR$RP_G0-#A-)n=^`1kqkG>*9=89?z6>|ltsW!GlEdOcSN93JcsQF%HbW~dhDZ0CMs8xp0Y-ojkur9 z*YPV9T%4*itgudIn+u~SGL|C^13fS3JssT6+xX$aTED!e zsPn~@YvFqE<$zA59bWsTMCk-*RKUG{tzOB?xL5peuwfwhb`DM6!zJuq-EY-4;w*pm zH(d_k)$_y~+uK)GJ8j~-txk=vcqj>D@KCDe*QM1rbmZbSA>o*z zXuBpOA5T)>ss_?<*Bk(a^9-x=I2)s{Du?kO2?92To?r_AnQ~N#dZRFkp$Zs|P9^ zq`NQ4gX$t3Xw+qWCh7m_1ewE*i{K<%77!>fI0>84c%7QP_{NV%eW5&QJ^71ORu5~(NKijTd1haCmKPlv8!Dm0S zx6`KPsnawCb~QT3y`ad0iQ*+_;mamv;D3woV*Sk4HHCY-I+CtRpl+eg!@p+h_{Z*r zS9?uzZE8t`O&`+C;N!mE8$$Swkyt~C)U&{d6^C?;Xwq{>f4-cJ4RZxUHo4{ZZ**Ur zbA`YwhUMSAjNi#Em8e^2#+2Z!%J&gWaOatq6(mT%qfWQN=ODW#SLg0Uz0y0YutIZj zGb6dsc!jk*Oxa_4d3F0(Ete2oRfJR@WEwK2pKb8|H&$;3^;+j!M~YiulNb$2K`03S zo{lT^ZvxI>Jj(bN=#%+dXVdE}K7^KWTK8?dsQ(LAICudydv8F-t{JZJApaxL*EojR z?k{ya_j`gQg!mX^7QXpC63c;zcc-Ne??4YP^{%3>ug`$jAMy<6c#?RkCW39yR*6N# z2Afc?%e#=&Y;;qeCZ$Xn6_<6J=P#^kOuDB9*C|%jf=%w3k+#qJq9h(xB(HlsJ1>0_ zY^*xf(^qOMikU39}krw7V;s^=iu`$Ib7c5Sb*?Ap0PA)d#r*gWK| zzKkg~1M*_^rkx})HdK>z`>KD=RE3FiguIOpl_Z=XXjwvj#=kw=?Pg;o1mkhgxcQR4 zp0tn;Q?{g>DkcdWIHkFTUUoV8Y35zZjP1S}ae4H;?0&^wLkR>6@SbWDnbj8;7j{;M zzH=oQG*eP9c*2BS){2YKDy8d~6EY0mPbmE2Vw|uLXL-*+($Pkk^nqb4KmtIx-r&3& z!i2`0ud5rq<1j?9dZGU0@-xwdif*)D@EbZ<7p9i8)CDP0W|@^(0dZe`l{6C|m}U{+ zOQPG13zDJI4Kw*ucg}|mMBM(AV0RVvbYNq>KShU^2xdhIh8qV6`tGl>f%-N+eh z*(}gOJjwj?Q$%$a(rvtzuO>ClF{HjBb*CF#9hIcevb8uu1HJ*7Ew3c)PcX?A5|H0p@4PKN&f0qK~L=ZF=f5sI(y(}b{bPvTDs=^ z_Y|z!o3KSQs$-YRpuEplKF(vq<6TCnpUm?&gZI28+C>?tdZ?;9u;#0xOC^86I6-1_ zTAXDDGU*nRYE`eF#jgrAwG`FZ3!V%N-dB$D?Xn93qbyfVX+CR;KEnOdf*VIEQ1BL3 z*neNArR~4D@P$JKmqz1}@nI!L5BZs+N@CbPAPiMJ6Lq+W)|5@Rz9Oem+Gp5T{>i|$ z{s><&r6h5r?;Ud0F*S=VMxz6bmw8j6LT)xuH^vQ|;@D-_A9uQ<^!(1ak8Ew1k!X8} zo}MS4OZjdp(>5u5spJYIM=EOCMMP6X3v_&j;7aM{+31QgnnL;xB;+5p8%y)`U5&3l zU(}Gwq*<$~aRKNSh*0%8`Vt=$L5|(7uH%HQv8;`iWk=K%iUUI1z8(cq4U}~WSosOwA`UmUkNVA9zTn%&LVa_uUGfU*;lPo6xB~eMPu-3rp;G)UOuA#>}Mbn9UYm6hdYo74kD9wjyJR!dKWPO zO?Oi7by;ikbIz_Nf9d&{b(cM0^pUP(rfV&QDTqvBRM^E23ELQ8J?$UnIDMITtxIv1 z7)MmSi;IqAs6TWy&_IRXF7h7tW*~9&{+;Af!MSRobyu*>$1JiX&kynAa1SO%{@s5b z!YNnkT;Atz??PX1@igo<=GiE)Qqv(_1Io`XYd`go#=7n7s~Tr;tGU(YC`Wf1vSQfh zJTVLOO+jTy1jqy!SLTrI@12Hz@!4bwl+?ZHCj(czbg~>YAJBDvHR*6?7-xquf0fr( zrQ>#}6LsDEvMK5-4osUp>|`mFEHn8?fAJ77N0bKJ)#Q0^;{o)Ofbu5bCXq2D+M7_F z1(!V+R`##FXata!rEm=*4S9)*8`_Qs5v61eb$)OQ>!*MU9*GJNJu*nMCHVxT)G(|b zNq+^F0o9ZA<+;*)Hj^J=o)_TB6MV zhg1v|uKpZRj&>!hd^N56J)kmLkvh~a)v@~rhlbcc$0Rm??N z((^qePn>h@@{8n?s?MkFUBdhDpW7Tuwh8!N0ZHxlIDX!5kgXOHfwLGlU_M6wJ|Qy3 zCDdx)EU$olXJda~+)M|8tg*Wd)8qDdEuMSadKY(QMZSTe{7tmnsz7R^o^xU(oFz@+ zoAeK;Wbh%50Qru=H(0o=RrBR3}Fq3vXA#-L~ZTD1*(hXqPW1kL7iE!E7-@B!ve-Ux}7-=c-!bu*|Zhtp7-8_h#x~4Cjx4E*M z3?3s_Xtb#?Xz0LC{qgmrhc^udxc5WYg-rj3Hkl0@(6CBh2l2PVRn8vg6ilbx%Y%IrRk7y$A=o4VzHZwrdogQ z?X)t@IKW+u*ruqvrv#9CkKUJAhRlrKND{f=l-5oE>XugLq#5nSsQ2N?uoIUEdz-~i zKqJ^ia1i*2{3;js0W8xlI1YO6!4t8DM}UvRkWdo@3Z|eesTYX7k6{cYk%?GFY*3rf ziJ};d|Ak90yzINaV+o7QBz$~iZeO9r3G+QBh0kWaQ*J^!Y2eyH9lpR#Ad@V@L>lWXPI5qL5~aExa>UFcc+TkhU4iiJ;lTC=dL z+Lv8AzSQP8vn^OnmtMvatR;Tz8q)GK66y^4`=0g|!GE4Uc(~0k&J4iO=wTe3H7odY zh%h>@j)Q_aU6?CvoR)AHKI3cW&wAnL^?FHc5i?*(vT-XQ<9lkzYM~?AY#y2_|ua*f3nH)<3+h&_k|%=$FW@bDiYpbHZLo3javwQA;lvI>?_r0qRtJ1A}RFg?o_I0{$Xq){9s7A(JQi8KiY24-FtieR~9NL`+KcgRGCIX zV&utv6j=914MjAXbZnPF3eFE^rFO@$D7lH@w?DC%iQcJA;II6060K|}Hru*&BitRa zs0ex{A{})iFuyz+sTHl`|JftOtH|Tj*yqGaoJ9LWoI<*jzil){V2w?Yjk}ZKH`d(} zEr*9J4KaAWr9mas-r~H6bJ!A(v@CpcSptgHLz*Tbx9|;B^H>K$f|wQ1>4kr;hGq1z zxogP$vxlrA{>CHD`uq##b;M>1hXjPtd^@Z_HI7Q`0`58w_1N94f~a%+daXmtL8&XW zFHQP!Yu2x`O7rw@{?&`i!+H4MhyGCLec9+HM~{8M&1Q;7{&N?uot6ZLFAb3b2gY-* zL|mxw+wf>bamewxtz*+)R~;uPmc7~BY~2zBZ0&PePNi2X`XG=l8i)XY$WNyp6u^{i zGri)=K873BnHjhy_+oQmd+&*)K;3xf21btax)SN5BI1LxYCDc_O`2r!hGf6kPiZ|1}HxyZmwn>~|n zeA*1;_;OzQ;lpF`(Z$@y1P1a*ned%Fgfy$Zdhlui6X|Ss7QXcGmf%}?iM-ucMREqg zlw1_v@4q0Ejb*zI%>7G#T}6y2idtdrTGqu?HFN1aqbB>3nZ-TQoP~^05T=!@asf9tKAYsoxkS2XAdqTGGJyI#pd|);#QlE&hscJKw6G(;zrqqBAR8kK>oA0oV^{C32AhO?Pi*&=wrJs5_ z+}15c?d>fkYgd=&xH1Uj%b5go8mo+LyVY0y2rmK4y(lsB7BJ&1sX_t#Omebb?~p3Y zXJ5$a@cANwtnAZqJlt4P2f0{fVk+A}ZCiba^{)D)MR9L%#RbS9Xc#K^F48b7fZg3n zEX#RtYd!F%swV*chx0T(DBhSzDBa$4eQd8G?3?1VLVb&uj^j{)12t_{8{A?DS&;El z(HIjkN4>aj-NZyarW6jLlrlZ9EYdiF+hc=L!SC@0>J-)#x+ayXMnWZBPp|!a zBp!HLhyy^RLXn`12$UUwd?){~7!lI?3}OBl50Lk83g8R^iKjiZQ*jedE-Rt)ElBg7cjBc91WL)Zs&Y34vu=rL@+ZUFGg%; zB)qhgH>q?8`D0guGjNsPny~lpop}~oS%J7yI;_P!!7@vH1|@>Q>E__>QsTFDLZ0m#+=-T zj$B9&uYl|^bumnW)p*MB1+QbjUagPd4G)1M#^TVEpMIr!*%+K*>kvH|m*5zP@!gI6 zd1}4BF^0R2M8G0TKW2*FT5nPtGY#OGfqHAsCeBP}#j!pk0~cR>+W8Fg^P3f4l10xgAsk+f5>4)4NG zo}$LWkH(6%(;=RbGsx|_yn9r1@DXgB_F;=K2DKUm{pZ}#465VqNGsxyEaKH*4)741 z7{L8C6mYIb1B_zevQ+2979iJbGZo|-*_Sg+JX+QD&#vYbr>r6;!a-Y)qzG~XQy^T9 zUCqS^_~A1nCeCdHJeWwnoO^Em-7Qd8A?f56;{1d`k+=6in>rk!`!{FuvhP=E3IcbO zU_KAz`vMoD!)Cqa8kV+55K~6(+B|CnV!UxfJ{E6~3`gO|*y(5S(*>qMA}C%9KE-7F z4mI&>kHqbO;bAR(Jqk!KD0SS;WTo%_1kM#P>VD@{k<0Boe+&2G5i^O{WHtfq`h*|G z62Z=4XU%5neb|NEK*XT-K0fp_SUNA3oix5#_ z5!4JJk7X`7&LaK4oiV3Dnq`bcK$w6f43ntbV)SldnM6DlGp(Xv*CRm52u@szY;eey z4d_@2-I<|&E{XUku9}$323o%4qM2%uAQo(+@yFz_do0j?zbx?Tj#$d2hDRDf0Q(Zo z-vh)<0?|Yv8L2@#T2GV6bfW7C-mV9EHjQK#=A?k0EKRLni00(~BF8iXr0Hi*#aq-QAW9_H(|=mMNnWG4NsX|FxOxP34$#9$$@;2_f&pvN!+T1 z2b-`QqM>#mj$`D2vni;LCJ&J!gY26|tZiS@Y^#99+oy}%YvlM7a(t%xv%Rb_DJ@w? zYc9q)zk^`FxM16MhinARfwfdFad$*B&lL9wGMlLU!uhUDT0%2`;6ooQ^v zDxp@8#{V5cDvj`gBAUws&!m&#guZ^t1rZ*d_NrxFuu(aS6ditVx3c#dW{MYM1>NY6 z*D^%)!Ym>Js1+=}*ap@o%zO>I!TVVpR5@C-C{y$0x9ez>wf0r|r%K-hAOQBGRWo2q^Znj%I0NAbsy9xy9lxI@^ z^9eLOWBFK;*n@~>x%;$8z;-~$wEZzrE5DmCVi5P7L^)eW{L>Qy~E<}K@bKo%z2amo})b{@}G(W34ql@35fJ{q2RfuKhco10;=7Z3va zQ{v>7lJoggCL5lod_ZDh$%Uqn_$rq)BSmmfLAJr~13OVBwjP%qO+oiiq8;+`ghRV_ zTND1vHw}Xb#s_QrV!-YzonA`?th)b9jz|sLf=y>|JW1eWJ>IkW@_9#kv4a+Wko>3$8kzs|dfjFGG$hDznTS*pfEF4lO0pJy0L3KnS7;5SSw!a%2?LntH ziO-lPI;xiEg^OQWAmp~15W!tL51-ubkjfx(Ttkjq$ni(SL@C*Qv=hBqwgh?KEeATd z&%trcGXdSINk}fbNPoj zP7Vx=ohm+4)WY^t*U$EqnOHS$5^qxL0xCaQ1y%?PV&}#%3IhMj+v3EXVTo(OYGlm& zUzND-zyEP+*Ijj6%25-u*CPhR!rE&~vu^%F1kXXutg*Us*?yPQ2~mbC&%#Z{%`D}p zsrvd=q280X*Dvf;_o}4xE_otEO}AIwmNJVyVt6LAj@|r4-UV{ZPVxi$X6;5#yl4YS8eUxLzln?4C@zAus9nx{(WrMd$sVIW1+sYsf_aCU?<N?5E*z-KonwUc`?cEYH%^Vwp-s(hA)N&MyT*B@O z>7E~$mnh=&HH+fjf;A3VD!Z?=*Blb1ar0J->dDtfzsm`Qit#zfSwX|@p_g4|6AAk3 zg2>VgSL1wmeSR+{=QUK*9>5qANjr7XB|s5+B^!m^n+#+}m5aBZjr;1L!A!Z=meLg} zE4Gp1FM2>il%h{iVEJ33s1`4= zfIHmeg1Unm_doiaVCoW41uk`M%*Ir28{$E0v-JMZM)yO5BJ{L0Tt(2%1Z(dE%9=itmgDO;Sa6oleM2tzO01$-i zdt;2_fA$9V#r#HvN#u1;;9T5u(@id$c`LV0)t`bb6l?~k1&4k<85yR=DsY$+DYBFB zmazvT`s|_w<|wnDQa%W#soOrRpUOl7!}f}RnVbG3l|f6w6OdpLE8KInOt=Bh!Xb0YF9UVj&<++`^@ajI8Zsy zszXEfw+R;wQtDgT^+{*kzRm+Ub@v}Qbx4tk(ff`K%n2vhejrJF>|sXgp+*>41+m~l zK^dvYCxDEGy5Ti+HpmWm{x#L}0t#YUMZwn(7!ljOb13ZGp&PRC$k6+)%vN}!{b`eg zTXtd9z>QGpv9~+kp^Ll1x)C&^;#FT8WXJEqGI6m0sft?mP((w?*d9}etA1&l_p?H| zCy2tLj$0mjkjt(;`&L?A`7S*GW{P`T4?n=Z#m%($;3Wj0QjYdua$-Z%_Xcsn*#(b0 z>j1&YkQ-#RY*D3wo~WyvV`2@7;?jdGDakT+G8QEgil) za`f`KIDVIx$irNgWb;-`CJ7`F;YQ*dN;T}I0bdO+I6s?rvb7?HodOJ!0nFSf*H<&2 zgIO*ysY8#erjNf6E1h8jdep_iRNS8#gG(47JdvI#iod7I$b+j+#4&5m)^-N)HUmb3SLMkhR{Y{hOO?+Un- z@*C^FtF`tW5+0$gY zQctcHdJM4^TMz-m$%^jsmwMeEa53=GGVmD(h#C)?M+0KvDHpDKfz(o1Je6DUoMcA1 zWE*hmVxMrHvR9Ec&hx)`NyP4H#2^-&2Pb;-qQ&s1;+$Ek3+C9?10A7>g-b4I!9X5X zvYxZ|@6oswgAU+1d7GNHn~Rfi%$t87H#w4KsH!%RWYZ!rFFB2q=UUKWstXIRg{p@gC(1NKV8 z$|Ocvz;n+pGZ!caK>m_nf+U=;aPS8SLfHLLh)Dvn;3cO)8J9U*s^axN+2q*va~k=d zvvv*_BhwyH*nOVceQ^(C(AP3|oEM_9inUb7~?zwRB0Kp3QNCsHOSWTs(NWn^>ZdN>E~vCKrcb;Coq+ zSAwD4v*M~qoj3n}Qz4yWU`L6kpIj2EhqEHUwt1f%ZxWTghL}N()W4E6sRSk$hhUh= zb$K8dn!^)EAsT!wKV92kW?+%17*xY6$Z6V36!IoApSF!0(*Hlm`1~y)hpWjklKecx zacN*@KtzFBh*J50ytdu#BaBKaQHUiBxxnx8rX|>KHtPz8b_9d(M8!4^s3}5($aJVH zE}&3#{kpZ*8=E!--QuQx_~4Si9$DzWSLEZPj$l?!m0z7(+@unUTx1D`)<0w&VMBD( z$AW|_UPh-uahXg|GJBm3`dV8*W=5H+FS+3V66JAi*w2X){ZUOUp~yv+V8E!nxO&#i zC1gFy_+nuGAR>f(790i!JXLe|oVs3rA3uEH%5lR7-j$ZpRz)flB_U6ujS$R@B7%V= zLu~>GM%PMa8Nhx;K^pB)#{=>nM46ig1`;?NWYIf7l(n6;t6=j&)x-kyjc_t34FJLH z6kUKVgkZ8>>;Pf zFxU7+k>Ih$7A34+w~%$XKq_}oj0>oC2GQ`UU(wDIMQz3;G6;kAQ)kbJ{3$q8^;C+B zP?RVa&xyiLe9(_^x3+w#Vn8bR84R2yW#i|per(~RS|w8%WC|y}3oo4`SkSUWEXv5m zIpJp=GpUXD8Yq;#&c(v9*NMcW-Je$_!GKtht!ZE&G>rs1c2d)4!@hQVj;Kh9h}#5; zm0X;Y-wZR!68K?kdL|dkZ7bsUto4Wy?J*7ii)jpH5EPwf&K1x_j<#1&I4=_=l~~Bd z`64ak0KqH{@_$Jcbs?;bRYApS00%G6S5#kboVx}AZK5;h`l>EJNrO~kBNx|~dR#`u zP^j35Oa`>`7R)lc*X@b$8(YyR5({rPnyo|(0%Nek`IjoO#7ZvC7u@MmF>QXYkE;hM z52$`dS3M97x^RUt&3OA#BP-LufQRbp>sfyuq+rgaPAZ9z3q!>StM}l@N@I6NC?=R? z$jPBZ6GYJr%0L+1AQKptRTGGXlB7P!Y5F~LV%4}B^0>%ibhiVL35#ASDz|CvVHWxH z{-s1ieLy%5bP)rAK%uzL3-3iYtv4`GfT4LHmBh)#bs;vBhs}g!P`OphdJTLbaqX-a z)L|BOGdPn-XArk?(UnvZHH$bv;#~_2W(6md>e8xkOs#mCrvK86AKWk zDq0nYdO^`6iXY2)uR-rx)tk=8-F43F3vIvyDx$G#04ERX6LO5mI;m(uF0upzUk4P2 zN-!N9srr3L!D^100(H(e>%&gq7Avo5)1%x89#iWQj&3Z z`i}~g@#xR!jMY$`b3VsStk~4$i+o4xC0e_uf;wvs8GhnY$}>j`4J4Q1L-*4 z(Zg0o1@vfgAZG!@!rSK4p!o2{rq``C^{c`(>_(7deU?c@J92Rd2Fm3nixWj+0R%(C z#ZjV5B7!juwhd%}qSf{5*4d>WA#2MvlVhgS$|Up9id)=Pw9|wO^5`bS^GJp$bJo<1eg8&lL-)WJ>gBx3 z_k*(+&7OFs63@B2VpTo2d%Holf$Uk3i6TP5g1FKlm6l?>o+ZUPkU$)0s{oZUX4rsU zBs18wQOG@D;vf&Di%>SHYOD?-LI0wLyo8jCfm|RdYgE1=Z5WONN(PQ!Y~)SJhlSCj zOrl(t0Yw5bp(;fopi)*9o9Sbei=>kAD+53n+GOEoSDk=R6gN&sehQi;uPkmCNiOff{4$0 zLMSE|A&2DhtaFp6SrM{!jcUm@+=>K8jQc|wLWzjXgrdEeST(NJMKl#jDwxWuN^5*& zmF?UeUsYvQR#uv_fEdi1=H}*ZA_0jckY{8F#gI!cq5-+oIAl|kB-yxu)(oRYp$)?& zgnS6$l$M&D7)cV@yJwHpdZ^Xf3q`W0Im!LDTU>;)+1wFBE}0-3FY(x_hbSK4I;TfW zF^~^b2Z#fY_Rp;!TZhQ+P3deL2Xz7xL4cs4SQFd~xu_$Xa))Flxd=xqfOHC_g&qWi zus02T7Yc>$zw71$u@%Vi<3x2d+dsp6f%0f|$V3BBGTCg@i6NIH2xpQ*GFLmKGbu~w zPsylJ7OV1ScL!GF16c^09JkZf$lck#^^?&G<$PxfrBpa# { + const recommendationBotEnabled = useFeatureFlagEnabled( + FeatureFlags.RecommendationBot, + ) + return ( + + { + // eslint-disable-next-line no-constant-condition + recommendationBotEnabled ? ( + + ) : ( + <> + ) + } + + ) +} + +export default ChatPage diff --git a/frontends/main/src/app-pages/ChatPage/send.ts b/frontends/main/src/app-pages/ChatPage/send.ts new file mode 100644 index 0000000000..368ac8bd99 --- /dev/null +++ b/frontends/main/src/app-pages/ChatPage/send.ts @@ -0,0 +1,79 @@ +import { NluxAiChatProps } from "@/page-components/Nlux-AiChat/AiChat" + +type ChatEndpoint = "agent" + +function getCookie(name: string) { + const value = `; ${document.cookie}` + const parts = value.split(`; ${name}=`) + if (parts.length === 2) { + return parts.pop()?.split(";").shift() + } +} + +const makeRequest = async (endpoint: ChatEndpoint, message: string) => + fetch(`${process.env.NEXT_PUBLIC_MITOL_API_BASE_URL}/api/v0/chat_agent/`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-CSRFToken": + getCookie(process.env.NEXT_PUBLIC_CSRF_COOKIE_NAME || "csrftoken") ?? + "", + }, + credentials: "include", // TODO Remove this, should be handled by same-origin + body: JSON.stringify({ message }), + }) + +const RESPONSE_DELAY = 500 + +// Function to send query to the server and receive a stream of chunks as response +const makeSend = + ( + endpoint: ChatEndpoint, + processContent: (content: string) => string = (content) => content, + ): NluxAiChatProps["send"] => + async (message, observer) => { + const response = await makeRequest(endpoint, message) + + if (response.status !== 200) { + observer.error(new Error("Failed to connect to the server")) + return + } + + if (!response.body) { + return + } + + // Read a stream of server-sent events + // and feed them to the observer as they are being generated + const reader = response.body.getReader() + const textDecoder = new TextDecoder() + + // eslint-disable-next-line no-constant-condition + while (true) { + const { value, done } = await reader.read() + if (done) { + /** + * Without the pause here, some messages were getting displayed completely + * empty. Unsure why. + * + * Maybe related to stream having only a single chunk? + */ + await new Promise((res) => setTimeout(res, RESPONSE_DELAY)) + break + } + + const content = textDecoder.decode(value) + if (content) { + observer.next(processContent(content)) + } + } + + observer.complete() + } + +const sends: Record = { + agent: makeSend("agent"), +} + +export { sends } +export type { ChatEndpoint } diff --git a/frontends/main/src/app/chat/page.tsx b/frontends/main/src/app/chat/page.tsx new file mode 100644 index 0000000000..b9c2ce4846 --- /dev/null +++ b/frontends/main/src/app/chat/page.tsx @@ -0,0 +1,16 @@ +import React from "react" +import { Metadata } from "next" + +import ChatPage from "@/app-pages/ChatPage/ChatPage" +import { standardizeMetadata } from "@/common/metadata" + +export const metadata: Metadata = standardizeMetadata({ + title: "Chat Demo", + robots: "noindex", +}) + +const Page: React.FC = () => { + return +} + +export default Page diff --git a/frontends/main/src/common/feature_flags.ts b/frontends/main/src/common/feature_flags.ts index 0a21e9c50c..a0f9c47f28 100644 --- a/frontends/main/src/common/feature_flags.ts +++ b/frontends/main/src/common/feature_flags.ts @@ -4,4 +4,5 @@ export enum FeatureFlags { EnableEcommerce = "enable-ecommerce", DrawerV2Enabled = "lr_drawer_v2", + RecommendationBot = "recommendation-bot", } diff --git a/frontends/main/src/page-components/Nlux-AiChat/AiChat.stories.tsx b/frontends/main/src/page-components/Nlux-AiChat/AiChat.stories.tsx new file mode 100644 index 0000000000..d87eadf387 --- /dev/null +++ b/frontends/main/src/page-components/Nlux-AiChat/AiChat.stories.tsx @@ -0,0 +1,52 @@ +import * as React from "react" +import type { Meta, StoryObj } from "@storybook/react" +import styled from "@emotion/styled" +import { send } from "./mock-send" + +import { NluxAiChat } from "./AiChat" + +const Container = styled.div({ + width: "388px", + height: "600px", +}) + +const meta: Meta = { + title: "smoot-design/AiChat (Nlux)", + render: (args) => { + return ( + + + + ) + }, +} + +export default meta + +type Story = StoryObj + +const InitialConversation: Story = { + storyName: "AiChat", + args: { + initialConversation: [ + { + role: "assistant", + message: "Hello! What are you interested in learning about today?", + }, + ], + }, +} +const ConversationStarters: Story = { + storyName: "AiChat", + args: { + conversationOptions: { + conversationStarters: [ + { prompt: "I'm interested in quantum computing." }, + { prompt: "I want to learn about global warming." }, + { prompt: "I curious about AI applications for business." }, + ], + }, + }, +} + +export { InitialConversation, ConversationStarters } diff --git a/frontends/main/src/page-components/Nlux-AiChat/AiChat.tsx b/frontends/main/src/page-components/Nlux-AiChat/AiChat.tsx new file mode 100644 index 0000000000..73554ed945 --- /dev/null +++ b/frontends/main/src/page-components/Nlux-AiChat/AiChat.tsx @@ -0,0 +1,80 @@ +import * as React from "react" +import { useCallback, useState } from "react" +import { + AiChat, + AiChatProps, + useAsStreamAdapter, + useAiChatApi, +} from "@nlux/react" +import type { StreamSend } from "@nlux/react" +import { personas } from "./personas" + +import "@nlux/themes/unstyled.css" +import "./nlux-theme.css" +import { Alert, styled } from "ol-components" +import { extractJSONFromComment } from "ol-utilities" + +type NluxAiChatProps = Pick< + AiChatProps, + "initialConversation" | "conversationOptions" +> & { + send: StreamSend +} + +const StyledDebugPre = styled.pre({ + width: "80%", + whiteSpace: "pre-wrap", +}) + +const NluxAiChat: React.FC = (props) => { + const adapter = useAsStreamAdapter(props.send, []) + const [lastMessageReceived, setLastMessageReceived] = useState("") + const onMessageReceived = useCallback( + (payload: { message: React.SetStateAction }) => + setLastMessageReceived(payload.message), + [setLastMessageReceived], + ) + const api = useAiChatApi() + return ( + <> + + {lastMessageReceived && + (lastMessageReceived.toString().includes('{"error":') ? ( + + {extractJSONFromComment(lastMessageReceived)?.error?.message || + "Sorry, an unexpected error occurred."} + + ) : lastMessageReceived.toString().includes("