-
Notifications
You must be signed in to change notification settings - Fork 0
/
rag_chatbot.py
102 lines (70 loc) · 2.55 KB
/
rag_chatbot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import os
from dotenv import load_dotenv
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.chat_models import ChatOpenAI as OpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from chatbot_tools.retriever import bm25_retrieval, mmr_retrieval, ddg_retrieval
from chatbot_tools.prompt import get_react_template
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
import openai
import streamlit as st
from langchain_community.callbacks.streamlit import (
StreamlitCallbackHandler,
)
load_dotenv()
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
llm = OpenAI(
model_name="gpt-4o-mini",
streaming=True
)
tools = [
bm25_retrieval,
ddg_retrieval,
mmr_retrieval
]
prompt_template = get_react_template()
react_agent = create_react_agent(llm, tools, prompt_template)
memory = ConversationBufferWindowMemory(k=6, return_messages=True)
agent_executor = AgentExecutor(
agent=react_agent,
tools=tools,
verbose=True,
handle_parsing_errors=True,
memory=memory
)
def handle_user_input(user_input: str) -> dict:
st_callback = StreamlitCallbackHandler(st.container())
result = agent_executor.invoke({"input": user_input}, {"callbacks": [st_callback]})
return result
# import os
# from dotenv import load_dotenv
# from langchain import hub
# from langchain.agents import AgentExecutor, create_react_agent
# from langchain_community.chat_models import ChatOpenAI as OpenAI
# from langchain_community.tools.tavily_search import TavilySearchResults
# from chatbot_tools.retriever import bm25_retrieval, mmr_retrieval, ddg_retrieval
# from chatbot_tools.prompt import get_react_template
# import streamlit as st
# load_dotenv()
# llm = OpenAI(
# model_name="gpt-4o-mini"
# # temperature=0.7,
# # api_key=os.getenv("OPENAI_API_KEY")
# )
# tools = [
# bm25_retrieval,
# ddg_retrieval,
# mmr_retrieval
# ]
# prompt = get_react_template()
# react_agent = create_react_agent(llm, tools, prompt)
# agent_executor = AgentExecutor(agent=react_agent,
# tools=tools,
# verbose=True,
# handle_parsing_errors=True)
# user_input = "can I drink cola after wisdom teeth removal?"
# result = agent_executor.invoke({"input": user_input})
# print(result)