-
Notifications
You must be signed in to change notification settings - Fork 0
/
qa_faq.py
111 lines (70 loc) · 2.53 KB
/
qa_faq.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import openai
import streamlit as st
from elasticsearch import Elasticsearch
def search(query, coursefilter = "data-engineering-zoomcamp", index_name = "faq_search"):
es_client = Elasticsearch("http://localhost:9200")
search_settings = {
"size": 5,
"query": {
"bool": {
"must": {
"multi_match": {
"query": query,
"fields": ["question", "text", "section"],
"type": "best_fields"
}
},
"filter": {
"term": {
"course": coursefilter
}
}
}
}
}
response = es_client.search(index = index_name, body = search_settings)
result_docs = []
#print(response)
for hit in response['hits']['hits']:
result_docs.append(hit['_source'])
return result_docs
def build_prompt(query, related_docs):
prompt = """
You are an assistant for teaching online courses answer the question based on the context provided.
Only the information available in the provided context should be used strictly adhere to this
question:{query}
context:{context}
""".strip()
context = ""
for doc in related_docs:
context += f"section: {doc['section']}\nquestion: {doc['question']}\nanswer: {doc['text']} \n\n"
print("context:",context)
prompt = prompt.format(query = query, context = context).strip()
return prompt
def chat(prompt, model = "phi3"):
client = openai.OpenAI(
base_url = "http://localhost:11434/v1/",
api_key = "ollama"
)
response = client.chat.completions.create(
model = model,
messages = [{"role":"system", "content":"you are a faq assistant"},
{"role": "user", "content": prompt}]
)
print(response)
return response.choices[0].message.content
def rag(query, model = "phi3"):
results = search(query = query)
prompt = build_prompt(query = query, related_docs = results)
answer = chat(prompt, model = model)
return answer
def main():
st.title("RAG Function Invocation")
user_input = st.text_input("Enter your input:")
if st.button("Ask"):
with st.spinner('Processing...'):
output = rag(user_input)
st.success("Completed!")
st.write(output)
if __name__ == "__main__":
main()