-
Notifications
You must be signed in to change notification settings - Fork 0
/
practice_beautifulsoap.py
69 lines (50 loc) · 2.33 KB
/
practice_beautifulsoap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from bs4 import BeautifulSoup
import requests
language_symbols = {}
def lang():
try:
response = requests.get("https://www.wikipedia.org/")
response.raise_for_status() # Raise an exception if there's an error with the request
soup = BeautifulSoup(response.content, 'html.parser')
for option in soup.find_all('option'):
language = option.text
symbol = option['lang']
language_symbols[language] = symbol
return list(language_symbols.keys())
except requests.exceptions.RequestException as e:
print("Error fetching language data:", e)
return []
def data(selected_topic, selected_language):
symbol = language_symbols.get(selected_language)
try:
url = f"https://{symbol}.wikipedia.org/wiki/{selected_topic}"
data_response = requests.get(url)
data_response.raise_for_status() # Raise an exception if there's an error with the request
data_soup = BeautifulSoup(data_response.content, 'html.parser')
main_content = data_soup.find('div', {'id': 'mw-content-text'})
filtered_content = ""
if main_content:
for element in main_content.descendants:
if element.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
filtered_content += "\n" + element.get_text(strip=True).upper() + "\n"
elif element.name == 'p':
filtered_content += element.get_text(strip=True) + "\n"
return filtered_content
except requests.exceptions.RequestException as e:
print("Error fetching Wikipedia content:", e)
return "Error fetching data."
def get_image_urls(query):
try:
search_url = f"https://www.google.com/search?q={query}&tbm=isch"
image_response = requests.get(search_url)
image_response.raise_for_status() # Raise an exception if there's an error with the request
image_soup = BeautifulSoup(image_response.content, 'html.parser')
image_urls = []
for img in image_soup.find_all('img'):
image_url = img.get('src')
if image_url and image_url.startswith("http"):
image_urls.append(image_url)
return image_urls[0]
except requests.exceptions.RequestException as e:
print("Error fetching image URLs:", e)
return None