Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
gdmoney committed Dec 17, 2021
1 parent 389d2de commit 285ee18
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 6 deletions.
5 changes: 3 additions & 2 deletions AWS/lambda_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@
from urls import URL_LIST

# loop through the URLs
for page in URL_LIST:
for each_url in URL_LIST:
# query each website and return html, parse the html using Beautiful Soup and store in variable 'soup'
soup = BeautifulSoup(requests.get(page).content, 'html.parser')
page = requests.get(each_url)
soup = BeautifulSoup(page.content, 'html.parser')

# take out the <div> of name and get its value
product_name_box = soup.find('h1', attrs={'class': 'h1 product-name text-uppercase d-none d-sm-block large-devices'})
Expand Down
5 changes: 3 additions & 2 deletions GCP/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from urls import URL_LIST

# loop through the URLs
for page in URL_LIST:
for each_url in URL_LIST:
# query each website and return html, parse the html using Beautiful Soup and store in variable 'soup'
soup = BeautifulSoup(requests.get(page).content, 'html.parser')
page = requests.get(each_url)
soup = BeautifulSoup(page.content, 'html.parser')

# take out the <div> of name and get its value
product_name_box = soup.find('h1', attrs={'class': 'h1 product-name text-uppercase d-none d-sm-block large-devices'})
Expand Down
4 changes: 2 additions & 2 deletions __main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
from urls import URL_LIST

# loop through the URLs
for website in URL_LIST:
for each_url in URL_LIST:
# query each website and return html, parse the html using Beautiful Soup and store in variable 'soup'
page = requests.get(website)
page = requests.get(each_url)
soup = BeautifulSoup(page.content, 'html.parser')

# take out the <div> of name and get its value
Expand Down

0 comments on commit 285ee18

Please sign in to comment.