-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdouyin.py
81 lines (68 loc) · 3 KB
/
douyin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# -*- coding:utf-8 -*-
# @Time : 2022/2/7 17:34
# @Author: 应无所住,何生其心
# @File : douyin.py
# @Software : PyCharm
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import re
from lxml import etree
class Douyu():
def __init__(self):
# 开始时的url
self.start_url = "https://www.douyu.com/directory/all"
# 实例化一个Chrome对象
self.driver = webdriver.Chrome(r'C:\Program Files\Google\Chrome\Application\chromedriver.exe')
# 用来写csv文件的标题
self.start_csv = True
# def __del__(self):
# self.driver.quit()
def get_content(self):
# 先让程序两秒,保证页面所有内容都可以加载出来
time.sleep(2)
item = {}
# 获取进入下一页的标签
next_page = self.driver.find_element_by_xpath("//span[text()='下一页']/..")
# 获取用于判断是否是最后一页的属性
is_next_url = next_page.get_attribute("aria-disabled")
# 获取存储信息的所有li标签的列表
li_list = self.driver.find_elements_by_xpath("//ul[@class='layout-Cover-list']//li")
# 提取需要的数据
for li in li_list:
item["user-id"] = li.find_element_by_xpath(".//div[@class='DyListCover-userName']").text
item["img"] = li.find_element_by_xpath(".//div[@class='DyListCover-imgWrap']//img").get_attribute("src")
item['class-name'] = li.find_element_by_xpath(".//span[@class='DyListCover-zone']").text
item["click-hot"] = li.find_element_by_xpath(".//span[@class='DyListCover-hot']").text
item["click-hot"] = re.sub(r'\n', '', item['click-hot'])
# 保存数据
self.save_csv(item)
# 返回是否有下一页和下一页的点击事件的标签,
return next_page, is_next_url
def save_csv(self, item):
# 将提取存放到csv文件中的内容连接为csv格式文件
str = ','.join([i for i in item.values()])
with open('./douyu.csv', 'a', encoding='utf-8') as f:
if self.start_csv:
f.write("用户id,image,所属类,点击热度\n")
self.start_csv = False
# 将字符串写入csv文件
f.write(str)
f.write('\n')
print("save success")
def run(self):
# 启动chrome并定位到相应页面
self.driver.get(self.start_url)
while True:
# 开始提取数据,并获取下一页的元素
next_page, is_next = self.get_content()
if is_next != 'false':
break
# 点击下一页
next_page.click()
if __name__ == '__main__':
douyu_spider = Douyu()
douyu_spider.run()