-
Notifications
You must be signed in to change notification settings - Fork 166
/
scraper_nahdiFirebaseIntegration.py
45 lines (45 loc) · 1.77 KB
/
scraper_nahdiFirebaseIntegration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import requests
# import urllib.request offline
import time
from bs4 import BeautifulSoup
import json
import csv
filecsv = open('nahdionline.csv', 'w', encoding='utf8')
# Set the URL you want to webscrape from
url = 'https://www.nahdionline.com/ar/daily-essentials/hair-products/hair-styling?p='
file = open('nahdionline.json', 'w', encoding='utf8')
API_ENDPOINT = "https://f.firebaseio.com/products.json"
file.write('[\n')
data = {}
csv_columns = ['name', 'category', 'price', 'img']
for page in range(20):
print('---', page, '---')
r = requests.get(url + str(page))
print(url + str(page))
soup = BeautifulSoup(r.content, "html.parser")
ancher = soup.find_all(
'li', {'class': 'item product product-item'})
writer = csv.DictWriter(filecsv, fieldnames=csv_columns)
i = 0
writer.writeheader()
for pt in ancher:
name = pt.find('strong', {'class': 'product name product-item-name'})
price = pt.find('span', {'class': 'price'})
img = pt.find('span', {'class': 'product-image-wrapper'})
data['img'] = img.find('img')['src']
writer.writerow({'name': name.text})
writer.writerow({'img': data['img']})
writer.writerow({'price': price.text})
data['price'] = price.text
data['name'] = name.text.replace(' \n', '').replace(
'\n\n ', '')
writer.writerow({'category': url.rsplit('/', 1)[1]})
data['category'] = url.rsplit('/', 1)[1].replace('?p=', '')
json_data = json.dumps(data, ensure_ascii=False)
file.write(json_data)
r = requests.post(url=API_ENDPOINT, data=json_data.encode('utf-8'))
file.write(",\n")
file.write("\n]")
# defining the api-endpoint
filecsv.close()
file.close()