Skip to content

Commit

Permalink
linting
Browse files Browse the repository at this point in the history
  • Loading branch information
NHagar committed Apr 3, 2024
1 parent f31a77f commit 29dfd86
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 22 deletions.
36 changes: 18 additions & 18 deletions substack_api/newsletter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ def list_all_categories() -> List[Tuple[str, int]]:
Get name / id representations of all newsletter categories
"""
endpoint_cat = "https://substack.com/api/v1/categories"
r = requests.get(endpoint_cat, headers=HEADERS)
r = requests.get(endpoint_cat, headers=HEADERS, timeout=30)
categories = [(i["name"], i["id"]) for i in r.json()]
return categories


def category_id_to_name(id: int) -> str:
def category_id_to_name(user_id: int) -> str:
"""
Map a numerical category id to a name
Expand All @@ -30,11 +30,11 @@ def category_id_to_name(id: int) -> str:
id : Numerical category identifier
"""
categories = list_all_categories()
category_name = [i[0] for i in categories if i[1] == id]
category_name = [i[0] for i in categories if i[1] == user_id]
if len(category_name) > 0:
return category_name[0]
else:
raise ValueError(f"{id} is not in Substack's list of categories")

raise ValueError(f"{user_id} is not in Substack's list of categories")


def category_name_to_id(name: str) -> int:
Expand Down Expand Up @@ -65,7 +65,7 @@ def get_newsletters_in_category(
Parameters
----------
category_id : Numerical category identifier
subdomains_only : Whether to return only newsletter subdomains (needed for post collection), or to return all metadata
subdomains_only : Whether to return only newsletter subdomains (needed for post collection)
start_page : Start page for paginated API results
end_page : End page for paginated API results
"""
Expand All @@ -78,7 +78,7 @@ def get_newsletters_in_category(
all_pubs = []
while more and page_num < page_num_end:
full_url = base_url + str(page_num)
pubs = requests.get(full_url, headers=HEADERS).json()
pubs = requests.get(full_url, headers=HEADERS, timeout=30).json()
more = pubs["more"]
if subdomains_only:
pubs = [i["id"] for i in pubs["publications"]]
Expand All @@ -103,8 +103,8 @@ def get_newsletter_post_metadata(
Parameters
----------
newsletter_subdomain : Substack subdomain of newsletter (can be retrieved from `get_newsletters_in_category`)
slugs_only : Whether to return only post slugs (needed for post content collection), or to return all metadata
newsletter_subdomain : Substack subdomain of newsletter
slugs_only : Whether to return only post slugs (needed for post content collection)
start_page : Start page for paginated API results
end_page : End page for paginated API results
"""
Expand All @@ -115,16 +115,16 @@ def get_newsletter_post_metadata(
all_posts = []
while offset_start < offset_end:
full_url = f"https://{newsletter_subdomain}.substack.com/api/v1/archive?sort=new&search=&offset={offset_start}&limit=10"
posts = requests.get(full_url, headers=HEADERS).json()
posts = requests.get(full_url, headers=HEADERS, timeout=30).json()

if len(posts) == 0:
break

last_id = posts[-1]["id"]
if last_id == last_id_ref:
break
else:
last_id_ref = last_id

last_id_ref = last_id

if slugs_only:
all_posts.extend([i["slug"] for i in posts])
Expand All @@ -145,16 +145,16 @@ def get_post_contents(
Parameters
----------
newsletter_subdomain : Substack subdomain of newsletter (can be retrieved from `get_newsletters_in_category`)
newsletter_subdomain : Substack subdomain of newsletter
slug : Slug of post to retrieve (can be retrieved from `get_newsletter_post_metadata`)
html_only : Whether to get only HTML of body text, or all metadata/content
"""
endpoint = f"https://{newsletter_subdomain}.substack.com/api/v1/posts/{slug}"
post_info = requests.get(endpoint, headers=HEADERS).json()
post_info = requests.get(endpoint, headers=HEADERS, timeout=30).json()
if html_only:
return post_info["body_html"]
else:
return post_info

return post_info


def get_newsletter_recommendations(newsletter_subdomain: str) -> List[Dict[str, str]]:
Expand All @@ -163,10 +163,10 @@ def get_newsletter_recommendations(newsletter_subdomain: str) -> List[Dict[str,
Parameters
----------
newsletter_subdomain : Substack subdomain of newsletter (can be retrieved from `get_newsletters_in_category`)
newsletter_subdomain : Substack subdomain of newsletter
"""
endpoint = f"https://{newsletter_subdomain}.substack.com/recommendations"
r = requests.get(endpoint, headers=HEADERS)
r = requests.get(endpoint, headers=HEADERS, timeout=30)
recs = r.text
soup = BeautifulSoup(recs, "html.parser")
div_elements = soup.find_all("div", class_="publication-content")
Expand Down
8 changes: 4 additions & 4 deletions substack_api/user.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def get_user_id(username: str) -> int:
The username of the Substack user.
"""
endpoint = f"https://substack.com/api/v1/user/{username}/public_profile"
r = requests.get(endpoint, headers=HEADERS)
r = requests.get(endpoint, headers=HEADERS, timeout=30)
user_id = r.json()["id"]
return user_id

Expand All @@ -32,7 +32,7 @@ def get_user_reads(username: str) -> List[Dict[str, str]]:
The username of the Substack user.
"""
endpoint = f"https://substack.com/api/v1/user/{username}/public_profile"
r = requests.get(endpoint, headers=HEADERS)
r = requests.get(endpoint, headers=HEADERS, timeout=30)
user_data = r.json()
reads = [
{
Expand All @@ -57,7 +57,7 @@ def get_user_likes(user_id: int):
endpoint = (
f"https://substack.com/api/v1/reader/feed/profile/{user_id}?types%5B%5D=like"
)
r = requests.get(endpoint, headers=HEADERS)
r = requests.get(endpoint, headers=HEADERS, timeout=30)
likes = r.json()["items"]
return likes

Expand All @@ -72,6 +72,6 @@ def get_user_notes(user_id: int):
The user ID of the Substack user.
"""
endpoint = f"https://substack.com/api/v1/reader/feed/profile/{user_id}"
r = requests.get(endpoint, headers=HEADERS)
r = requests.get(endpoint, headers=HEADERS, timeout=30)
notes = r.json()["items"]
return notes

0 comments on commit 29dfd86

Please sign in to comment.