-
Notifications
You must be signed in to change notification settings - Fork 0
/
scrape.py
148 lines (129 loc) · 5.38 KB
/
scrape.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import argparse
import itertools
import json
import os
from os.path import join
import pathlib
import subprocess
import tempfile
from urllib.parse import urlparse, parse_qs, urlencode
import xml.etree.ElementTree as ET
import zipfile
import dateutil.parser
import requests
def main():
parser = argparse.ArgumentParser()
parser.add_argument('start')
parser.add_argument('output')
parser.add_argument('--max-iter')
parser.add_argument('--verbose', '-v')
parser.add_argument('--pdf', action='store_true')
parser.add_argument('--meta', action='store_true')
args = parser.parse_args()
ns = {
"oai": "http://www.openarchives.org/OAI/2.0/",
"dcterms": "http://purl.org/dc/terms/",
"kk": "http://www.kansalliskirjasto.fi/oai",
}
N_papers = 0
url = args.start
# PDF temporary directories
pdftmpdir = tempfile.TemporaryDirectory(dir=os.environ['XDG_RUNTIME_DIR'], prefix='scrape')
with zipfile.ZipFile(args.output, 'a') as data:
with open(args.output+'.meta', 'a') as metadata:
namelist = set(data.namelist())
#print(namelist)
for i in itertools.count():
print()
print(f'{i:-5} {url}')
# Get the page
r = requests.get(url)
page = ET.fromstring(r.text)
page_data = {
'i': 0,
"url": url,
"page": r.text,
}
#data.open(f'listing/{i:06d}.json', 'w').write(json.dumps(page_data).encode())
# Get all records
records = page.findall(".//oai:record", ns)
# Parse all records
for record in records:
identifier = record.find('.//oai:identifier', ns).text.replace('/', '%2F')
# Has this been deleted?
if record.find(".//oai:header[@status='deleted']", ns):
print(" INFO: record deleted")
continue
# Get basic info
date = dateutil.parser.parse(record.find('.//dcterms:issued', ns).text)
year = date.year
print(f' {N_papers:-6} {year} {identifier}')
N_papers += 1
#abstract = record.find('.//dcterms:abstract', ns).text
record_str = ET.tostring(record)
import IPython ; IPython.embed() ; exit()
##
## PDFs
##
# Already in archive?
pdf_combined_name = f'pdf-combined/{year}/{identifier}.pdf'
if pdf_combined_name in namelist:
print(' already present')
continue
if not args.pdf:
continue
else:
files = record.findall('.//kk:file', ns)
#for file in files:
# fname = file.attrib['href']
fnames = { f.attrib['href']: os.path.join(pdftmpdir.name, f'{i:04}.pdf')
for (i,f) in enumerate(files) }
if len(fnames) == 1:
url = next(iter(fnames))
try:
full_pdf = requests.get(url).content
except UnicodeDecodeError:
print(" ERROR: UnicodeError when downloading")
continue
else:
# Download and save
for url, fname in fnames.items():
try:
content = requests.get(url).content
except UnicodeDecodeError:
print(" ERROR: UnicodeError when downloading")
continue
open(fname, 'wb').write(content)
# Combine all PDFs
pdf_combined = join(pdftmpdir.name, 'combined.pdf')
cmd = ['pdftk', ] + list(fnames.values()) + ['cat', 'output', pdf_combined]
#print(cmd)
#import IPython ; IPython.embed()
subprocess.call(cmd)
# combining may not succeed. In which case, ignore.
if not os.access(pdf_combined, os.F_OK):
print(' ERROR: PDF not combined')
continue
full_pdf = open(pdf_combined, 'rb').read()
os.unlink(pdf_combined)
# Save to zipfile
data.open(pdf_combined_name, 'w').write(full_pdf)
data.open(f'record/{year}/{identifier}.xml', 'w').write(record_str)
del full_pdf
# Find resumption
rt_element = page.find('.//oai:resumptionToken', ns)
#print(rt_element)
if rt_element is None:
import IPython ; IPython.embed()
break
rt = rt_element.text
# Construct new URL
u = urlparse(args.start)
query = parse_qs(u.query)
del query['set']
del query['metadataPrefix']
query['resumptionToken'] = [rt]
url = u._replace(query=urlencode(query, doseq=True, safe='/')).geturl()
#exit(1)
if __name__ == "__main__":
main()