Skip to content

Commit

Permalink
refactor: handle 404 exception with decorator and logically (#55)
Browse files Browse the repository at this point in the history
Co-authored-by: tokitou-san <[email protected]>
  • Loading branch information
svlobao and moonlitgrace authored Sep 23, 2023
1 parent cccbcb6 commit 08b1809
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 21 deletions.
20 changes: 19 additions & 1 deletion app/api/decorators/return_decorator.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from fastapi import HTTPException
import functools
from collections.abc import Callable
from typing import Any, TypeVar
Expand All @@ -13,10 +14,27 @@ def decorator(func: Callable[..., Any]) -> Callable[..., T]:
def wrapper(*args: Any, **kwargs: Any) -> T:
try:
return func(*args, **kwargs)

except AttributeError:
return return_type

return wrapper

return decorator


def return_on_404() -> Callable[..., Callable[..., Any]]:
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return await func(*args, **kwargs)
# propagates HTTPException from function
except HTTPException as http_exception:
raise http_exception
# catches all other execptions
except Exception:
raise HTTPException(status_code=404, detail="Page not found")

return wrapper

return decorator
60 changes: 41 additions & 19 deletions app/api/endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
# helpers
from .helpers.string import StringHelper

# decorators
from .decorators.return_decorator import return_on_404

# scrapers
from .scrapers.popular import PopularScraper
from .scrapers.topten import TopTenScraper
Expand All @@ -24,91 +27,100 @@

# router endpoints
@router.get(
"/popular",
path="/popular",
response_model=list[PopularMangaModel],
summary="Popular Mangas",
description="Get a list of Mangas which is popular/trending this season. Returns basic details of mangas, use its `slug` to get more details of Manga.",
)
@return_on_404()
async def get_popular(offset: int = 0, limit: int = Query(10, le=10)):
response = PopularScraper().scrape()
return response[offset : offset + limit]


@router.get(
"/top-10",
path="/top-10",
response_model=list[TopTenMangaModel],
summary="Top 10 Mangas",
description="Get a list of Mangas which is top 10 this season. Returns basic details of mangas, use its `slug` to get more details of Manga.",
)
@return_on_404()
async def get_top_ten(offset: int = 0, limit: int = Query(10, le=10)):
response = TopTenScraper().scrape()
return response[offset : offset + limit]


@router.get(
"/most-viewed/{chart}",
path="/most-viewed/{chart}",
response_model=list[MostViewedMangaModel],
summary="Most Viewed Mangas",
description="Get a list of Mangas which is most viewed by chart - `today` `week` `month`. Returns basic details of mangas, use its `slug` to get more details of Manga.",
)
@return_on_404()
async def get_most_viewed(chart: str, offset: int = 0, limit: int = Query(10, le=10)):
most_viewed_scraper = MostViewedScraper()

if chart in most_viewed_scraper.CHARTS:
response = most_viewed_scraper.scrape(chart)
return response[offset : offset + limit]
else:
message = f"Passed query ({chart}) is invalid. Valid queries {' | '.join(most_viewed_scraper.CHARTS)}"
status_code = 400

raise HTTPException(
detail={"message": message, "status_code": status_code},
status_code=status_code,
)
raise HTTPException(status_code=404, detail=f"Invalid chart {chart}")


@router.get(
"/manga/{slug}",
path="/manga/{slug}",
response_model=MangaModel,
summary="Manga",
description="Get more details about a specific Manga by `slug`, eg: `/manga/one-piece-3/` - returns the full details of that specific Manga.",
)
@return_on_404()
async def get_manga(slug: str):
response = BaseMangaScraper(url=f"https://mangareader.to/{slug}").build_dict()
response = BaseMangaScraper(url=f"https://mangareader.to/{slug}").scrape()

if not response["title"]:
raise HTTPException(status_code=404, detail=f"Manga with slug {slug} was not found")
return response


@router.get(
"/search",
path="/search",
response_model=list[BaseSearchModel],
summary="Search Mangas",
description="Search Mangas with a `keyword` as query. eg: `/search/?keyword=one piece/` - returns a list of Mangas according to this keyword.",
)
@return_on_404()
async def search(
keyword: str, page: int = 1, offset: int = 0, limit: int = Query(10, le=18)
):
url = f"https://mangareader.to/search?keyword={keyword}&page={page}"
response = BaseSearchScraper(url).scrape()

if not response:
raise HTTPException(
status_code=404, detail=f"Manga with keyword {keyword} was not found"
)
return response[offset : offset + limit]


@router.get(
"/random",
path="/random",
response_model=MangaModel,
summary="Random",
description="Get details about random Manga. Returns a `dict` of randomly picked Manga. Note: some fields might be `null` because all animes are not registered properly in database.",
)
@return_on_404()
async def random():
response = BaseMangaScraper(url="https://mangareader.to/random/").build_dict()
response = BaseMangaScraper(url="https://mangareader.to/random/").scrape()
return response


@router.get(
"/completed",
path="/completed",
response_model=list[BaseSearchModel],
summary="Completed Mangas",
description="Get list of completed airing Mangas. eg: `/completed/` - returns a list of Mangas which is completed airing lately. Also has `sort` query which get each pages of Mangas ( 1 page contains 18 Mangas ): valid `sort` queries - `default` `last-updated` `score` `name-az` `release-date` `most-viewed`.",
)
@return_on_404()
async def completed(
page: int = 1, sort: str = "default", offset: int = 0, limit: int = Query(10, le=18)
):
Expand All @@ -119,11 +131,12 @@ async def completed(


@router.get(
"/genre/{genre}",
path="/genre/{genre}",
response_model=list[BaseSearchModel],
summary="Genre",
description="Search Mangas with genres. eg: `/genre/action/` - returns a list of Mangas with genre `action`. Also has `sort` query which get each pages of Mangas ( 1 page contains 18 Mangas ): valid `sort` queries - `default` `last-updated` `score` `name-az` `release-date` `most-viewed`.",
)
@return_on_404()
async def genre(
genre: str,
page: int = 1,
Expand All @@ -134,16 +147,22 @@ async def genre(
slugified_sort = string_helper.slugify(sort, "-")
url = f"https://mangareader.to/genre/{genre}/?sort={slugified_sort}&page={page}"
response = BaseSearchScraper(url).scrape()

if not response:
raise HTTPException(
status_code=404, detail=f"Manga with genre {genre} was not found"
)
return response[offset : offset + limit]


@router.get(
"/type/{type}",
path="/type/{type}",
response_model=list[BaseSearchModel],
summary="Type",
description="Search Mangas with types. eg: `/type/manga/` - returns a list of Mangas with type `manga`. Also has `page` query which get each pages of Mangas ( 1 page contains 18 Mangas ): valid `type` queries - `manga`, `one-shot`, `doujinshi`, `light-novel`, `manhwa`, `manhua`, `comic`.",
)
def type(
@return_on_404()
async def type(
type: str,
page: int = 1,
sort: str = "default",
Expand All @@ -154,4 +173,7 @@ def type(
slugified_sort = string_helper.slugify(sort, "-")
url = f"https://mangareader.to/type/{slugified_type}?sort={slugified_sort}&page={page}"
response = BaseSearchScraper(url).scrape()

if not response:
raise HTTPException(status_code=404, detail=f"Manga of type {type} was not found")
return response[offset : offset + limit]
2 changes: 1 addition & 1 deletion app/api/scrapers/base_manga.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def get_volumes(self):
return item_list

@return_on_error({})
def build_dict(self) -> dict:
def scrape(self) -> dict:
manga_dict = {
"manga_id": self.get_manga_id,
"title": self.get_title,
Expand Down

0 comments on commit 08b1809

Please sign in to comment.