-
Notifications
You must be signed in to change notification settings - Fork 1
/
url_parsing.py
287 lines (230 loc) · 9.4 KB
/
url_parsing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
import pandas as pd
import numpy as np
from collections import namedtuple
from hashlib import md5
import re
from postgres_ops import get_pg_engine, bulk_upload_to_pg, truncate_or_drop_tables, camel_to_snake, create_tables
from utils import timed, mem_and_info
urlRecord = namedtuple('urlRecord', ['url', 'documentType', 'title', 'documentId', 'author'])
urlRecord.__new__.__defaults__ = (None,) * 5
urlRecord()
def get_urls(start_date='2020-01-01'):
query = """SELECT DISTINCT url FROM
(SELECT DISTINCT path AS url FROM lessraw_small WHERE timestamp >= '{0}'
UNION
SELECT DISTINCT ga_page_path AS url FROM ga_pages WHERE date >= '{0}'
) sub
WHERE url IS NOT NULL""".format(start_date)
engine = get_pg_engine()
with engine.begin() as conn:
urls = pd.read_sql(query, conn)
engine.dispose()
return urls
homepage_pattern = re.compile(r'(^/$)|(^/\?)', re.IGNORECASE)
post_patterns = re.compile(r'(?<=/s/\w{17}/p/)(\w{17})|(?<=/posts/)\w{17}', re.IGNORECASE)
post_custom_pattern = re.compile(r'((?<=/rationality/)|(?<=/codex/))([\w-]+)|((?<=/lw/\w{2}/)|(?<=/lw/\w{3}/))\w+', re.IGNORECASE)
sequence_patterns = re.compile(r'(?<=/s/)\w{17}$', re.IGNORECASE)
old_wiki_patterns = re.compile(r'wiki\.lesswrong', re.IGNORECASE)
user_pattern = re.compile(r'((?<=\/user/)|(?<=\/users/))(\w+)', re.IGNORECASE)
tags_pattern = re.compile(r'(?<=\/tag\/)([\w-]+)', re.IGNORECASE)
def resolve_url_uncurried(url, dfs): #TO-DO Parse /users/ patterns, #TO-DO parse external urls
def pattern_search(pattern):
return re.search(pattern, url) #, re.IGNORECASE)
def simple_record_pattern(pattern, document_type=None):
if not document_type:
document_type =pattern
if pattern_search(pattern):
return urlRecord(url=url, documentType=document_type)
else:
return False
posts = dfs['posts']
sequences = dfs['sequences']
users = dfs['users']
tags = dfs['tags']
## Frontpage
# homepage_pattern = r'(^/$)|(^/\?)'
if pattern_search(homepage_pattern): #pattern_search(homepage_pattern):
return urlRecord(
url=url,
documentType='frontpage'
)
## POSTS
# standard post url: "/posts/<id>" or "/s/<sequence_id>/p/<posts_id>"
# post_patterns = r'(?<=/s/\w{17}/p/)(\w{17})|(?<=/posts/)\w{17}'
matches = pattern_search(post_patterns)
if matches:
postId = matches.group(0)
matching_posts = posts[posts['_id'] == postId]
if not matching_posts.empty:
post = posts[posts['_id'] == postId].iloc[0]
return urlRecord(
url=url,
documentType='post',
title=post['title'],
documentId=postId,
author=post['displayName']
)
# "/rationalty/<slug>" or "/lw/<short_id>/slug" (resolve on slug)
# post_custom_pattern = r'((?<=/rationality/)|(?<=/codex/))([\w-]+)|((?<=/lw/\w{2}/)|(?<=/lw/\w{3}/))\w+'
matches = pattern_search(post_custom_pattern)
if matches:
post_slug = matches.group(0).replace('_', '-')
post = posts[posts['slug'] == post_slug]
if not post.empty:
post = post.iloc[0]
return urlRecord(
url=url,
documentType='post',
title=post['title'],
documentId=post['_id'],
author=post['displayName']
)
else:
return urlRecord(
url=url,
documentType='post',
)
##Sequences
# "/s/<sequenceId>"
# sequence_patterns = r'(?<=/s/)\w{17}$'
matches = pattern_search(sequence_patterns)
if matches:
sequenceId = matches.group(0)
sequence = sequences[sequences['_id'] == sequenceId]
if not sequence.empty:
sequence = sequence.iloc[0]
return urlRecord(
url=url,
documentType='sequence',
title=sequence['title'],
documentId=sequenceId
)
else:
return urlRecord(
url=url,
documentType='sequence',
documentId=sequenceId
)
## Old Wiki
# old_wiki_patterns = r'wiki\.lesswrong'
matches = pattern_search(old_wiki_patterns)
if matches:
return urlRecord(
url=url,
documentType='old_lw_wiki',
)
## Users
def sluggify(name): #Just pull slug from the user object, though this 99.2% accurate
if type(name)==str:
slug = name.lower()
for char in [' ', '.']:
slug = slug.replace(char, '-')
for bad_char in [',', '(', ')']:
slug = slug.replace(bad_char, '')
return slug[0:60] if len(slug)>60 else slug
else:
return ''
## Users
# user_pattern = r'((?<=\/user/)|(?<=\/users/))(\w+)'
matches = pattern_search(user_pattern)
if matches:
userSlug = matches.group(0)
users = users[users['username'].apply(sluggify)==userSlug]
if not users.empty:
user = users.iloc[0]
return urlRecord(
url = url,
title= user['displayName'],
documentId = user['_id'],
documentType='/user'
)
else:
return urlRecord(
url=url,
documentType='/user'
)
## Tags
# tags_pattern = r'(?<=\/tag\/)([\w-]+)'
matches = pattern_search(tags_pattern)
if matches:
tagSlug = matches.group(0)
tag = tags[tags['slug']==tagSlug.lower()]
if not tag.empty:
tag = tag.iloc[0]
return urlRecord(
url = url,
title= tag['name'],
documentId = tag['_id'],
documentType='/tag/'
)
else:
return urlRecord(
url=url,
documentType='/tag/'
)
for pattern in [r'/allPosts', r'/about', r'/events', r'/inbox', r'/search', r'/verify-email', r'/editPost',
r'/community', r'/groups', r'/coronavirus-link-database', '/shortform', '/tags'
r'/codex', 'r/rationality'
]:
record = simple_record_pattern(pattern)
if record:
return record
## Doesn't match any
return urlRecord(url)
def resolve_urls(df, dfs, url_col='url'):
unique_urls = df.dropna().drop_duplicates(subset=url_col)
urls_resolved = unique_urls[url_col].astype(str).apply(lambda x: pd.Series(data=resolve_url_uncurried(x, dfs)))
urls_resolved.columns = ['url', 'type', 'title', 'documentId', 'author']
urls_resolved = urls_resolved.fillna(np.nan)
urls_resolved['onsite'] = urls_resolved['url'].str.match(r'(^\/)') & ~urls_resolved['url'].str.match('http')
return urls_resolved
@timed
def get_resolved_urls(dfs, sample=None, start_date=None, url_col='url'):
urls = get_urls(start_date)
if sample:
urls = urls.sample(sample)
# def resolve_urls_curried(x):
# return resolve_urls(x, dfs)
# resolve_urls_curried = lambda x: resolve_urls(x, dfs)
# urls_resolved = parallelize_dataframe(urls, resolve_urls_curried, 2)
urls_resolved = resolve_urls(urls, dfs)
urls_resolved['url_hash'] = urls_resolved[url_col].apply(lambda x: md5(x.encode()).hexdigest())
cols = ['url', 'type', 'title', 'author', 'document_id', 'url_hash']
urls_resolved.columns = urls_resolved.columns.to_series().apply(camel_to_snake)
return urls_resolved[cols]
@timed
def run_url_table_update(dfs, override_start=None):
engine = get_pg_engine()
with engine.begin() as conn:
# Download current PG url table
query = """SELECT * FROM urls"""
urls_existing = pd.read_sql(query, conn)
# Get birth of url table
if override_start:
start_date = override_start
else:
start_date = urls_existing['birth'].max()
# Download & Resolve new URLs since ~birth
urls_resolved_new = get_resolved_urls(dfs, start_date=(pd.to_datetime(start_date) - pd.Timedelta('1 days')).strftime('%Y-%m-%d'))
urls_resolved_new['birth'] = pd.datetime.now()
# Append new urls to existing table, drop duplicates
urls_updated = pd.concat([urls_existing, urls_resolved_new]).drop_duplicates(subset=['url_hash'])
# Replace existing PG table
print(mem_and_info(urls_updated))
truncate_or_drop_tables('urls', conn, drop=True)
create_tables('urls', conn)
bulk_upload_to_pg(urls_updated, table_name='urls', conn=conn)
engine.dispose()
## tests
# resolve_url('http://wiki.lesswrong.com/wiki/Mysterious_Answers_to_Mysterious_Questions')
# resolve_url('/lw/y8/interlude_with_the_confessor_48/')
# resolve_url('/s/5g5TkQTe9rmPS5vvM/p/CPm5LTwHrvBJCa9h5#cite.0.Buehler.2002')
# resolve_url('/lw/3w3/how_to_beat_procrastination/')
# resolve_url('/rationality/preface')
# resolve_url('/codex/eight-short-studies-on-excuses')
# resolve_url('/')
# resolve_url('https://www.lesswrong.com/s/fqh9TLuoquxpducDb/p/Masoq4NdmmGSiq2xw')
# resolve_url('https://www.lesswrong.com/posts/895quRDaK6gR2rM82/diseased-thinking-dissolving-questions-about-d...')
# resolve_url('https://www.lesswrong.com/s/9bvAELWc8y2gYjRav')
# resolve_url('/s/5g5TkQTe9rmPS5vvM')
# resolve_url('www.leesdsafasdfdsaf')