-
Notifications
You must be signed in to change notification settings - Fork 0
/
AnitamaSpider.py
51 lines (41 loc) · 1.39 KB
/
AnitamaSpider.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# -*- coding: utf-8 -*-
import requests
import os
from bs4 import BeautifulSoup
import re
path = __Where_you_want_to_put_articles_in+"/" #文件路径
main = 'http://www.anitama.cn/'
def getHtmlText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "Something Wrong!"
def find_article_url(page):
url = main+'channel/all/'+str(page)
r = getHtmlText(url)
print('page:'+str(page))
soup = BeautifulSoup(r, 'lxml')
article_url = re.findall(
r'article/[0-9a-f]*', str(soup.find_all(id="area-article-channel")))
for i in range(len(article_url)):
article_url[i] = main+article_url[i]
return article_url
def download(article_url):
flag = 1
for aurl in article_url:
r = getHtmlText(aurl)
soup = BeautifulSoup(r, 'lxml')
with open(path+"Article/"+soup.title.get_text().replace("/", "")+".html", "w") as f:
f.write(soup.find(id="area-title-article").prettify())
f.write(soup.find(id="area-content-article").prettify())
f.write(soup.find(id="area-copyright-article").prettify())
print("Success *"+str(flag))
flag = flag+1
if __name__ == '__main__':
article_url = []
for page in range(1, 301):
article_url.extend(find_article_url(page))
download(article_url)