diff --git a/python/community/llama-bot/README.md b/python/community/llama-bot/README.md new file mode 100644 index 00000000..1f29a89e --- /dev/null +++ b/python/community/llama-bot/README.md @@ -0,0 +1,43 @@ +# Llama 3.1 Chatbot with Flet +A simple chatbot application using Llama 3.1 model from Groq, built with the Flet framework. This app provides a user interface for sending messages to the chatbot and receiving responses. +## Installation + +1. Clone the repository: + + ```bash + git clone https://github.com/yourusername/llama-chatbot.git + cd llama-chatbot + ``` + +2. Install the required Python packages: + + ```bash + pip install flet groq + ``` + +3. Set up your Groq API key. Follow the instructions below to obtain your API key. + +## Getting the API Key from Groq + +1. Visit the [Groq website](https://groq.com) and sign up for an account. +2. Navigate to the API section of your account settings. +3. Generate a new API key and copy it. + +## Usage + +1. Open the `chatbot.py` file in a text editor. +2. Replace the placeholder API key with your Groq API key: + + ```python + client = Groq( + api_key='your_groq_api_key_here', + ) + ``` + +3. Save the file and run the application: + + ```bash + python chatbot.py + ``` + +4. The application will open a window with the chat interface. Type your message and press "Send" to interact with the chatbot. diff --git a/python/community/llama-bot/app.py b/python/community/llama-bot/app.py new file mode 100644 index 00000000..faa30627 --- /dev/null +++ b/python/community/llama-bot/app.py @@ -0,0 +1,59 @@ +from groq import Groq +import flet as ft + +# Initialize Groq client with API key +client = Groq( + api_key='your_groq_api_key_here', +) + +class Message: + def __init__(self, user: str, text: str, response_text: str): + self.user = user + self.text = text + self.response_text = response_text + +def main(page: ft.Page): + chat = ft.ListView(expand=True, spacing=10, padding=10, auto_scroll=True) + new_message = ft.TextField(expand=True, hint_text="Type your message here...") + + def on_message(message: Message): + chat.controls.append(ft.Text(f"User: {message.text}")) + chat.controls.append(ft.Text(f"Bot: {message.response_text}")) + page.update() + + page.pubsub.subscribe(on_message) + + def send_click(e): + user_message = new_message.value + if user_message: + new_message.value = "" + processing_text = ft.Text("Processing answer...", color="blue") + chat.controls.append(processing_text) + page.update() + chat_completion = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": user_message, + } + ], + model="llama-3.1-70b-versatile", + ) + response_text = chat_completion.choices[0].message.content + message = Message(user=page.session_id, text=user_message, response_text=response_text) + page.pubsub.send_all(message) + new_message.value = "" + page.update() + + page.add( + ft.Container( + content=ft.Column([ + chat, + ft.Row([new_message, ft.ElevatedButton("Send", on_click=send_click)]) + ]), + expand=True, + padding=10 + ) + ) + +ft.app(target=main) diff --git a/python/community/llama-bot/requirements.txt b/python/community/llama-bot/requirements.txt new file mode 100644 index 00000000..1919a6ac --- /dev/null +++ b/python/community/llama-bot/requirements.txt @@ -0,0 +1,2 @@ +flet +groq