Skip to content

Commit

Permalink
Fix for python 2.7
Browse files Browse the repository at this point in the history
  • Loading branch information
Nicnl committed Jan 26, 2019
1 parent 632a6a1 commit df65566
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions anime_dl/sites/crunchyroll.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,21 +196,21 @@ def singleEpisode(self, url, cookies, token, resolution):
xml_page = xml_page_connect.text.encode("utf-8")

try:
m3u8_file_link = str(re.search(r'<file>(.*?)</file>', xml_page.decode("utf-8")).group(1)).replace("&amp;", "&")
m3u8_file_link = str(re.search(r'<file>(.*?)</file>', xml_page).group(1)).replace("&amp;", "&")
logging.debug("m3u8_file_link : %s", m3u8_file_link)

if not m3u8_file_link:
# If no m3u8 found, try the rtmpdump...
try:
host_link = re.search(r'<host>(.*?)</host>', xml_page.decode("utf-8")).group(1)
host_link = re.search(r'<host>(.*?)</host>', xml_page).group(1)
logging.debug("Found RTMP DUMP!")
print("RTMP streams not supported currently...")
except Exception as NoRtmpDump:
print("No RTMP Streams Found...")
print(NoRtmpDump)
else:
anime_name = re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(
re.search(r'<series_title>(.*?)</series_title>', xml_page.decode("utf-8")).group(1))).title().strip()
re.search(r'<series_title>(.*?)</series_title>', xml_page).group(1))).title().strip()
episode_number = re.search(r'<episode_number>(.*?)</episode_number>',
xml_page.decode("utf-8")).group(1)
video_width = re.search(r'<width>(.*?)</width>', xml_page.decode("utf-8")).group(1)
Expand Down Expand Up @@ -241,7 +241,7 @@ def singleEpisode(self, url, cookies, token, resolution):
pass
else:
self.subFetcher(
xml=str(xml_page.decode("utf-8")),
xml=str(xml_page),
episode_number=episode_number,
file_name=file_name)

Expand Down Expand Up @@ -527,9 +527,9 @@ def onlySubs(self, url, cookies):
video_id, url)
xml_page = sess.get(url=infoURL, headers=headers, cookies=cookies).text.encode("utf-8")

# anime_name = re.search(r'<series_title>(.*?)</series_title>', xml_page.decode("utf-8")).group(1)
# anime_name = re.search(r'<series_title>(.*?)</series_title>', xml_page).group(1)
anime_name = re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '',
str(re.search(r'<series_title>(.*?)</series_title>', xml_page.decode("utf-8")).group(1))).title().strip()
str(re.search(r'<series_title>(.*?)</series_title>', xml_page).group(1))).title().strip()

episode_number = re.search(r'<episode_number>(.*?)</episode_number>', xml_page.decode("utf-8")).group(1)
video_width = re.search(r'<width>(.*?)</width>', xml_page.decode("utf-8")).group(1)
Expand All @@ -548,7 +548,7 @@ def onlySubs(self, url, cookies):
if not os.path.exists(output_directory):
os.makedirs(output_directory)

self.subFetcher(xml=xml_page.decode('utf-8'), episode_number=episode_number, file_name=file_name)
self.subFetcher(xml=xml_page, episode_number=episode_number, file_name=file_name)

for sub_file in glob("*.ass"):
try:
Expand Down

0 comments on commit df65566

Please sign in to comment.