Skip to content

Commit

Permalink
Merge pull request #273 from flairNLP/remove_warn
Browse files Browse the repository at this point in the history
GH257: Replaces logging.warn with logging.warning
  • Loading branch information
MaxDall authored Jul 8, 2023
2 parents f39d393 + eac7f0a commit e7e2c7d
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 7 deletions.
2 changes: 1 addition & 1 deletion scripts/generate_parser_test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def get_test_article(enum: PublisherEnum) -> Optional[Article]:

if args.overwrite or not html_mapping.get(publisher.parser.latest_version):
if not (article := get_test_article(publisher)):
basic_logger.warn(f"Couldn't get article for {publisher.name}. Skipping")
basic_logger.warning(f"Couldn't get article for {publisher.name}. Skipping")
continue
html = HTMLTestFile(
url=article.html.responded_url,
Expand Down
2 changes: 1 addition & 1 deletion src/fundus/parser/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def get(self, ld_type: str, default: Any = None) -> Optional[LDMappingValue]:

if not _displayed_deprecation_info:
_displayed_deprecation_info = True
basic_logger.warn(
basic_logger.warning(
"LinkedDate.get() will be deprecated in the future. Use .get_value_by_key_path() "
"or .bf_search() instead"
)
Expand Down
7 changes: 4 additions & 3 deletions src/fundus/scraping/html.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import gzip
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
Expand Down Expand Up @@ -107,7 +106,7 @@ async def _get_pre_filtered_urls(self) -> AsyncIterator[str]:
html = await response.text()
rss_feed = feedparser.parse(html)
if exception := rss_feed.get("bozo_exception"):
basic_logger.warn(f"Warning! Couldn't parse rss feed '{self.url}' because of {exception}")
basic_logger.warning(f"Warning! Couldn't parse rss feed '{self.url}' because of {exception}")
return
else:
for url in (entry["link"] for entry in rss_feed["entries"]):
Expand Down Expand Up @@ -221,7 +220,9 @@ async def fetch(self) -> AsyncIterator[HTML]:
continue

except Exception as error:
basic_logger.warn(f"Warning! Skipped requested URL '{url}' because of an unexpected error {error}")
basic_logger.warning(
f"Warning! Skipped requested URL '{url}' because of an unexpected error {error}"
)
continue

if response.history:
Expand Down
4 changes: 2 additions & 2 deletions src/fundus/scraping/scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@ async def scrape(
)
if missing_attributes := extraction_filter.required_attributes - supported_attributes:
if len(missing_attributes) == 1:
basic_logger.warn(
basic_logger.warning(
f"The required attribute `{missing_attributes}` "
f"is not supported by {type(self.parser).__name__}. Skipping Scraper"
)
else:
basic_logger.warn(
basic_logger.warning(
f"The required attributes `{', '.join(missing_attributes)}` "
f"are not supported by {type(self.parser).__name__}. Skipping Scraper"
)
Expand Down

0 comments on commit e7e2c7d

Please sign in to comment.