forked from stanford-oval/storm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rm.py
406 lines (351 loc) · 15.7 KB
/
rm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
import logging
import os
from typing import Callable, Union, List
import dspy
import pandas as pd
import requests
from langchain_core.documents import Document
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_qdrant import Qdrant
from qdrant_client import QdrantClient, models
from tqdm import tqdm
from .utils import WebPageHelper
class YouRM(dspy.Retrieve):
def __init__(self, ydc_api_key=None, k=3, is_valid_source: Callable = None):
super().__init__(k=k)
if not ydc_api_key and not os.environ.get("YDC_API_KEY"):
raise RuntimeError("You must supply ydc_api_key or set environment variable YDC_API_KEY")
elif ydc_api_key:
self.ydc_api_key = ydc_api_key
else:
self.ydc_api_key = os.environ["YDC_API_KEY"]
self.usage = 0
# If not None, is_valid_source shall be a function that takes a URL and returns a boolean.
if is_valid_source:
self.is_valid_source = is_valid_source
else:
self.is_valid_source = lambda x: True
def get_usage_and_reset(self):
usage = self.usage
self.usage = 0
return {'YouRM': usage}
def forward(self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = []):
"""Search with You.com for self.k top passages for query or queries
Args:
query_or_queries (Union[str, List[str]]): The query or queries to search for.
exclude_urls (List[str]): A list of urls to exclude from the search results.
Returns:
a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
"""
queries = (
[query_or_queries]
if isinstance(query_or_queries, str)
else query_or_queries
)
self.usage += len(queries)
collected_results = []
for query in queries:
try:
headers = {"X-API-Key": self.ydc_api_key}
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
authoritative_results = []
for r in results['hits']:
if self.is_valid_source(r['url']) and r['url'] not in exclude_urls:
authoritative_results.append(r)
if 'hits' in results:
collected_results.extend(authoritative_results[:self.k])
except Exception as e:
logging.error(f'Error occurs when searching query {query}: {e}')
return collected_results
class BingSearch(dspy.Retrieve):
def __init__(self, bing_search_api_key=None, k=3, is_valid_source: Callable = None,
min_char_count: int = 150, snippet_chunk_size: int = 1000, webpage_helper_max_threads=10,
mkt='en-US', language='en', **kwargs):
"""
Params:
min_char_count: Minimum character count for the article to be considered valid.
snippet_chunk_size: Maximum character count for each snippet.
webpage_helper_max_threads: Maximum number of threads to use for webpage helper.
mkt, language, **kwargs: Bing search API parameters.
- Reference: https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters
"""
super().__init__(k=k)
if not bing_search_api_key and not os.environ.get("BING_SEARCH_API_KEY"):
raise RuntimeError(
"You must supply bing_search_subscription_key or set environment variable BING_SEARCH_API_KEY")
elif bing_search_api_key:
self.bing_api_key = bing_search_api_key
else:
self.bing_api_key = os.environ["BING_SEARCH_API_KEY"]
self.endpoint = "https://api.bing.microsoft.com/v7.0/search"
self.params = {
'mkt': mkt,
"setLang": language,
"count": k,
**kwargs
}
self.webpage_helper = WebPageHelper(
min_char_count=min_char_count,
snippet_chunk_size=snippet_chunk_size,
max_thread_num=webpage_helper_max_threads
)
self.usage = 0
# If not None, is_valid_source shall be a function that takes a URL and returns a boolean.
if is_valid_source:
self.is_valid_source = is_valid_source
else:
self.is_valid_source = lambda x: True
def get_usage_and_reset(self):
usage = self.usage
self.usage = 0
return {'BingSearch': usage}
def forward(self, query_or_queries: Union[str, List[str]], exclude_urls: List[str] = []):
"""Search with Bing for self.k top passages for query or queries
Args:
query_or_queries (Union[str, List[str]]): The query or queries to search for.
exclude_urls (List[str]): A list of urls to exclude from the search results.
Returns:
a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
"""
queries = (
[query_or_queries]
if isinstance(query_or_queries, str)
else query_or_queries
)
self.usage += len(queries)
url_to_results = {}
headers = {"Ocp-Apim-Subscription-Key": self.bing_api_key}
for query in queries:
try:
results = requests.get(
self.endpoint,
headers=headers,
params={**self.params, 'q': query}
).json()
for d in results['webPages']['value']:
if self.is_valid_source(d['url']) and d['url'] not in exclude_urls:
url_to_results[d['url']] = {'url': d['url'], 'title': d['name'], 'description': d['snippet']}
except Exception as e:
logging.error(f'Error occurs when searching query {query}: {e}')
valid_url_to_snippets = self.webpage_helper.urls_to_snippets(list(url_to_results.keys()))
collected_results = []
for url in valid_url_to_snippets:
r = url_to_results[url]
r['snippets'] = valid_url_to_snippets[url]['snippets']
collected_results.append(r)
return collected_results
class VectorRM(dspy.Retrieve):
"""Retrieve information from custom documents using Qdrant.
To be compatible with STORM, the custom documents should have the following fields:
- content: The main text content of the document.
- title: The title of the document.
- url: The URL of the document. STORM use url as the unique identifier of the document, so ensure different
documents have different urls.
- description (optional): The description of the document.
The documents should be stored in a CSV file.
"""
def __init__(self,
collection_name: str = "my_documents",
embedding_model: str = 'BAAI/bge-m3',
device: str = "mps",
k: int = 3,
chunk_size: int = 500,
chunk_overlap: int = 100):
"""
Params:
collection_name: Name of the Qdrant collection.
embedding_model: Name of the Hugging Face embedding model.
device: Device to run the embeddings model on, can be "mps", "cuda", "cpu".
k: Number of top chunks to retrieve.
chunk_size: Size of each chunk if you need to build the vector store from documents.
chunk_overlap: Overlap between chunks if you need to build the vector store from documents.
"""
super().__init__(k=k)
self.usage = 0
model_kwargs = {"device": device}
encode_kwargs = {"normalize_embeddings": True}
self.model = HuggingFaceEmbeddings(
model_name=embedding_model, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs
)
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
self.collection_name = collection_name
self.client = None
self.qdrant = None
def _check_create_collection(self):
"""
Check if the Qdrant collection exists and create it if it does not.
"""
if self.client is None:
raise ValueError("Qdrant client is not initialized.")
if self.client.collection_exists(collection_name=f"{self.collection_name}"):
print(f"Collection {self.collection_name} exists. Loading the collection...")
self.qdrant = Qdrant(
client=self.client,
collection_name=self.collection_name,
embeddings=self.model,
)
else:
print(f"Collection {self.collection_name} does not exist. Creating the collection...")
# create the collection
self.client.create_collection(
collection_name=f"{self.collection_name}",
vectors_config=models.VectorParams(size=1024, distance=models.Distance.COSINE),
)
self.qdrant = Qdrant(
client=self.client,
collection_name=self.collection_name,
embeddings=self.model,
)
def init_online_vector_db(self, url: str, api_key: str):
"""
Initialize the Qdrant client that is connected to an online vector store with the given URL and API key.
Args:
url (str): URL of the Qdrant server.
api_key (str): API key for the Qdrant server.
"""
if api_key is None:
if not os.getenv("QDRANT_API_KEY"):
raise ValueError("Please provide an api key.")
api_key = os.getenv("QDRANT_API_KEY")
if url is None:
raise ValueError("Please provide a url for the Qdrant server.")
try:
self.client = QdrantClient(url=url, api_key=api_key)
self._check_create_collection()
except Exception as e:
raise ValueError(f"Error occurs when connecting to the server: {e}")
def init_offline_vector_db(self, vector_store_path: str):
"""
Initialize the Qdrant client that is connected to an offline vector store with the given vector store folder path.
Args:
vector_store_path (str): Path to the vector store.
"""
if vector_store_path is None:
raise ValueError("Please provide a folder path.")
try:
self.client = QdrantClient(path=vector_store_path)
self._check_create_collection()
except Exception as e:
raise ValueError(f"Error occurs when loading the vector store: {e}")
def update_vector_store(
self,
file_path: str,
content_column: str,
title_column: str = "title",
url_column: str = "url",
desc_column: str = "description",
batch_size: int = 64
):
"""
Takes a CSV file where each row is a document and has columns for content, title, url, and description.
Then it converts all these documents in the content column to vectors and add them the Qdrant collection.
Args:
file_path (str): Path to the CSV file.
content_column (str): Name of the column containing the content.
title_column (str): Name of the column containing the title. Default is "title".
url_column (str): Name of the column containing the URL. Default is "url".
desc_column (str): Name of the column containing the description. Default is "description".
batch_size (int): Batch size for adding documents to the collection.
"""
if file_path is None:
raise ValueError("Please provide a file path.")
# check if the file is a csv file
if not file_path.endswith('.csv'):
raise ValueError(f"Not valid file format. Please provide a csv file.")
if content_column is None:
raise ValueError("Please provide the name of the content column.")
if url_column is None:
raise ValueError("Please provide the name of the url column.")
if self.qdrant is None:
raise ValueError("Qdrant client is not initialized.")
# read the csv file
df = pd.read_csv(file_path)
# check that content column exists and url column exists
if content_column not in df.columns:
raise ValueError(f"Content column {content_column} not found in the csv file.")
if url_column not in df.columns:
raise ValueError(f"URL column {url_column} not found in the csv file.")
documents = [
Document(
page_content=row[content_column],
metadata={
"title": row.get(title_column, ''),
"url": row[url_column],
"description": row.get(desc_column, ''),
}
)
for row in df.to_dict(orient='records')
]
# split the documents
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=self.chunk_size,
chunk_overlap=self.chunk_overlap,
length_function=len,
add_start_index=True,
separators=[
"\n\n",
"\n",
".",
"\uff0e", # Fullwidth full stop
"\u3002", # Ideographic full stop
",",
"\uff0c", # Fullwidth comma
"\u3001", # Ideographic comma
" ",
"\u200B", # Zero-width space
"",
]
)
split_documents = text_splitter.split_documents(documents)
# update and save the vector store
num_batches = (len(split_documents) + batch_size - 1) // batch_size
for i in tqdm(range(num_batches)):
start_idx = i * batch_size
end_idx = min((i + 1) * batch_size, len(split_documents))
self.qdrant.add_documents(
documents=split_documents[start_idx:end_idx],
batch_size=batch_size,
)
def get_usage_and_reset(self):
usage = self.usage
self.usage = 0
return {'VectorRM': usage}
def get_vector_count(self):
"""
Get the count of vectors in the collection.
Returns:
int: Number of vectors in the collection.
"""
return self.qdrant.client.count(collection_name=self.collection_name)
def forward(self, query_or_queries: Union[str, List[str]], exclude_urls: List[str]):
"""
Search in your data for self.k top passages for query or queries.
Args:
query_or_queries (Union[str, List[str]]): The query or queries to search for.
exclude_urls (List[str]): Dummy parameter to match the interface. Does not have any effect.
Returns:
a list of Dicts, each dict has keys of 'description', 'snippets' (list of strings), 'title', 'url'
"""
queries = (
[query_or_queries]
if isinstance(query_or_queries, str)
else query_or_queries
)
self.usage += len(queries)
collected_results = []
for query in queries:
related_docs = self.qdrant.similarity_search_with_score(query, k=self.k)
for i in range(len(related_docs)):
doc = related_docs[i][0]
collected_results.append({
'description': doc.metadata['description'],
'snippets': [doc.page_content],
'title': doc.metadata['title'],
'url': doc.metadata['url'],
})
return collected_results