-
-
Notifications
You must be signed in to change notification settings - Fork 2.2k
/
run_localGPT.py
303 lines (258 loc) · 10.6 KB
/
run_localGPT.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
import os
import logging
import click
import torch
import utils
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFacePipeline
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # for streaming response
from langchain.callbacks.manager import CallbackManager
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
from prompt_template_utils import get_prompt_template
from utils import get_embeddings
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
GenerationConfig,
pipeline,
)
from load_models import (
load_quantized_model_awq,
load_quantized_model_gguf_ggml,
load_quantized_model_qptq,
load_full_model,
)
from constants import (
EMBEDDING_MODEL_NAME,
PERSIST_DIRECTORY,
MODEL_ID,
MODEL_BASENAME,
MAX_NEW_TOKENS,
MODELS_PATH,
CHROMA_SETTINGS,
)
def load_model(device_type, model_id, model_basename=None, LOGGING=logging):
"""
Select a model for text generation using the HuggingFace library.
If you are running this for the first time, it will download a model for you.
subsequent runs will use the model from the disk.
Args:
device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU.
model_id (str): Identifier of the model to load from HuggingFace's model hub.
model_basename (str, optional): Basename of the model if using quantized models.
Defaults to None.
Returns:
HuggingFacePipeline: A pipeline object for text generation using the loaded model.
Raises:
ValueError: If an unsupported model or device type is provided.
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
if ".gguf" in model_basename.lower():
llm = load_quantized_model_gguf_ggml(model_id, model_basename, device_type, LOGGING)
return llm
elif ".ggml" in model_basename.lower():
model, tokenizer = load_quantized_model_gguf_ggml(model_id, model_basename, device_type, LOGGING)
elif ".awq" in model_basename.lower():
model, tokenizer = load_quantized_model_awq(model_id, LOGGING)
else:
model, tokenizer = load_quantized_model_qptq(model_id, model_basename, device_type, LOGGING)
else:
model, tokenizer = load_full_model(model_id, model_basename, device_type, LOGGING)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co/docs/transformers/
# main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
if device_type == "hpu":
from gaudi_utils.pipeline import GaudiTextGenerationPipeline
pipe = GaudiTextGenerationPipeline(
model_name_or_path=model_id,
max_new_tokens=1000,
temperature=0.2,
top_p=0.95,
repetition_penalty=1.15,
do_sample=True,
max_padding_length=5000,
)
pipe.compile_graph()
else:
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=MAX_NEW_TOKENS,
temperature=0.2,
# top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
def retrieval_qa_pipline(device_type, use_history, promptTemplate_type="llama"):
"""
Initializes and returns a retrieval-based Question Answering (QA) pipeline.
This function sets up a QA system that retrieves relevant information using embeddings
from the HuggingFace library. It then answers questions based on the retrieved information.
Parameters:
- device_type (str): Specifies the type of device where the model will run, e.g., 'cpu', 'cuda', etc.
- use_history (bool): Flag to determine whether to use chat history or not.
Returns:
- RetrievalQA: An initialized retrieval-based QA system.
Notes:
- The function uses embeddings from the HuggingFace library, either instruction-based or regular.
- The Chroma class is used to load a vector store containing pre-computed embeddings.
- The retriever fetches relevant documents or data based on a query.
- The prompt and memory, obtained from the `get_prompt_template` function, might be used in the QA system.
- The model is loaded onto the specified device using its ID and basename.
- The QA system retrieves relevant documents using the retriever and then answers questions based on those documents.
"""
"""
(1) Chooses an appropriate langchain library based on the enbedding model name. Matching code is contained within ingest.py.
(2) Provides additional arguments for instructor and BGE models to improve results, pursuant to the instructions contained on
their respective huggingface repository, project page or github repository.
"""
if device_type == "hpu":
from gaudi_utils.embeddings import load_embeddings
embeddings = load_embeddings()
else:
embeddings = get_embeddings(device_type)
logging.info(f"Loaded embeddings from {EMBEDDING_MODEL_NAME}")
# load the vectorstore
db = Chroma(persist_directory=PERSIST_DIRECTORY, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever()
# get the prompt template and memory if set by the user.
prompt, memory = get_prompt_template(promptTemplate_type=promptTemplate_type, history=use_history)
# load the llm pipeline
llm = load_model(device_type, model_id=MODEL_ID, model_basename=MODEL_BASENAME, LOGGING=logging)
if use_history:
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff", # try other chains types as well. refine, map_reduce, map_rerank
retriever=retriever,
return_source_documents=True, # verbose=True,
callbacks=callback_manager,
chain_type_kwargs={"prompt": prompt, "memory": memory},
)
else:
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff", # try other chains types as well. refine, map_reduce, map_rerank
retriever=retriever,
return_source_documents=True, # verbose=True,
callbacks=callback_manager,
chain_type_kwargs={
"prompt": prompt,
},
)
return qa
# chose device typ to run on as well as to show source documents.
@click.command()
@click.option(
"--device_type",
default="cuda" if torch.cuda.is_available() else "cpu",
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
@click.option(
"--show_sources",
"-s",
is_flag=True,
help="Show sources along with answers (Default is False)",
)
@click.option(
"--use_history",
"-h",
is_flag=True,
help="Use history (Default is False)",
)
@click.option(
"--model_type",
default="llama3",
type=click.Choice(
["llama3", "llama", "mistral", "non_llama"],
),
help="model type, llama3, llama, mistral or non_llama",
)
@click.option(
"--save_qa",
is_flag=True,
help="whether to save Q&A pairs to a CSV file (Default is False)",
)
def main(device_type, show_sources, use_history, model_type, save_qa):
"""
Implements the main information retrieval task for a localGPT.
This function sets up the QA system by loading the necessary embeddings, vectorstore, and LLM model.
It then enters an interactive loop where the user can input queries and receive answers. Optionally,
the source documents used to derive the answers can also be displayed.
Parameters:
- device_type (str): Specifies the type of device where the model will run, e.g., 'cpu', 'mps', 'cuda', etc.
- show_sources (bool): Flag to determine whether to display the source documents used for answering.
- use_history (bool): Flag to determine whether to use chat history or not.
Notes:
- Logging information includes the device type, whether source documents are displayed, and the use of history.
- If the models directory does not exist, it creates a new one to store models.
- The user can exit the interactive loop by entering "exit".
- The source documents are displayed if the show_sources flag is set to True.
"""
logging.info(f"Running on: {device_type}")
logging.info(f"Display Source Documents set to: {show_sources}")
logging.info(f"Use history set to: {use_history}")
# check if models directory do not exist, create a new one and store models here.
if not os.path.exists(MODELS_PATH):
os.mkdir(MODELS_PATH)
qa = retrieval_qa_pipline(device_type, use_history, promptTemplate_type=model_type)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res["result"], res["source_documents"]
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
if show_sources: # this is a flag that you can set to disable showing answers.
# # Print the relevant sources used for the answer
print("----------------------------------SOURCE DOCUMENTS---------------------------")
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
print("----------------------------------SOURCE DOCUMENTS---------------------------")
# Log the Q&A to CSV only if save_qa is True
if save_qa:
utils.log_to_csv(query, answer)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()