diff --git a/Changelog.md b/Changelog.md index 96105be..f317806 100644 --- a/Changelog.md +++ b/Changelog.md @@ -29,4 +29,6 @@ - Fix for #39 [2018.01.27] - Fix for #46 [2018.01.29] - Fix for #45 [2018.01.29] -- Temp fix for login #65, #66 [2018.10.11] \ No newline at end of file +- Temp fix for login #65, #66 [2018.10.11] +- Login Issue Fixed [2019.05.16] +- Re-structured the code for better maintainance and re-usability. [2019.05.16] \ No newline at end of file diff --git a/anime_dl/AnimeDL.py b/anime_dl/Anime_dl.py similarity index 81% rename from anime_dl/AnimeDL.py rename to anime_dl/Anime_dl.py index aac1969..1d13d7d 100644 --- a/anime_dl/AnimeDL.py +++ b/anime_dl/Anime_dl.py @@ -25,12 +25,13 @@ def __init__(self, url, username, password, resolution, language, skipper, logge exit() else: - sites.crunchyroll.CrunchyRoll( + sites.crunchyroll.Crunchyroll( url=url[0], password=password, username=username, resolution=resolution, language=language, skipper=skipper, logger=logger, episode_range=episode_range) elif website == "VRV": - print("Under development...") + print("Not Implemented") + exit(1) # if not url[0] or not username[0] or not password[0]: # print("Please enter the required arguments. Run __main__.py --help") # exit() @@ -39,11 +40,13 @@ def __init__(self, url, username, password, resolution, language, skipper, logge # sites.vrv.Vrv(url=url, password=password, username=username, resolution=resolution) elif website == "Funimation": - if not url[0] or not username[0] or not password[0]: - print("Please enter the required arguments. Run __main__.py --help") - exit() - else: - sites.funimation.Funimation(url[0], username, password, resolution, language) + print("Not Implemented") + exit(1) + # if not url[0] or not username[0] or not password[0]: + # print("Please enter the required arguments. Run __main__.py --help") + # exit() + # else: + # sites.funimation.Funimation(url[0], username, password, resolution, language) def honcho(self, url): # Verify that we have a sane url and return which website it belongs @@ -67,4 +70,4 @@ def honcho(self, url): return "Crunchyroll" elif domain in ["www.vrv.co", "vrv.co"]: - return "VRV" + return "VRV" \ No newline at end of file diff --git a/anime_dl/__init__.py b/anime_dl/__init__.py index b0579d9..74ad458 100644 --- a/anime_dl/__init__.py +++ b/anime_dl/__init__.py @@ -1,2 +1,4 @@ +import common +import external import sites -import external \ No newline at end of file + diff --git a/anime_dl/__main__.py b/anime_dl/__main__.py index a2c785c..f2abd81 100644 --- a/anime_dl/__main__.py +++ b/anime_dl/__main__.py @@ -3,11 +3,9 @@ """ __author__ = "Xonshiz" -__email__ = "xonshiz@psychoticelites.com" +__email__ = "xonshiz@gmail.com" """ -from AnimeDL import * -# from AnimeDL import * -# from anime_dl import AnimeDL +from Anime_dl import * from sys import exit from version import __version__ import argparse @@ -15,9 +13,9 @@ import platform -class main(): +class Main(): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Anime_DL downloads anime from CrunchyRoll and Funimation.') + parser = argparse.ArgumentParser(description='anime_dl downloads anime from CrunchyRoll and Funimation.') parser.add_argument('--version', action='store_true', help='Shows version and exits.') @@ -39,14 +37,14 @@ class main(): if args.verbose: logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) logging.debug('You have successfully set the Debugging On.') - logging.debug("Arguments Provided : %s" % (args)) + logging.debug("Arguments Provided : {0}".format(args)) logging.debug( - "Operating System : %s - %s - %s" % (platform.system(), platform.release(), platform.version())) - logging.debug("Python Version : %s (%s)" % (platform.python_version(), platform.architecture()[0])) + "Operating System : {0} - {1} - {2}".format(platform.system(), platform.release(), platform.version())) + logging.debug("Python Version : {0} ({1})".format(platform.python_version(), platform.architecture()[0])) logger = "True" if args.version: - print("Current Version : %s" % __version__) + print("Current Version : {0}".format(__version__)) exit() if args.skip: @@ -72,4 +70,4 @@ class main(): AnimeDL(url=args.input, username=args.username, password=args.password, resolution=args.resolution, language=args.language, skipper=skipper, - logger=logger, episode_range=args.range) + logger=logger, episode_range=args.range) \ No newline at end of file diff --git a/anime_dl/animeName.py b/anime_dl/animeName.py deleted file mode 100644 index def5bb4..0000000 --- a/anime_dl/animeName.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import re -import subprocess - -class animeName(object): - - def nameEdit(self, animeName, episodeNumber, resolution): - animeName = str(animeName).replace("039T", "'") - rawName = str(animeName).title().strip().replace("Season ", "S") + " - " +\ - str(episodeNumber).strip() + " [" + str(resolution) + "]" - file_name = str(re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(animeName))).title().strip().replace("Season ", "S")\ - + " - " + str(episodeNumber.zfill(2)).strip() + " [" + str(resolution) + "].mp4" - - try: - max_path = int(subprocess.check_output(['getconf', 'PATH_MAX', '/'])) - # print(MAX_PATH) - except (Exception): - max_path = 4096 - - if len(file_name) > max_path: - file_name = file_name[:max_path] - - return file_name - - def nameEditFuni(self, animeName, seasonNumber, episodeNumber, resolution): - rawName = str(animeName).title().strip().replace("Season ", "S") + " - " + "S%s E%s" %\ - (str(seasonNumber).strip(), - str(episodeNumber).strip())\ - + " [" + str(resolution) + "]" - file_name = str(re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(animeName))).title().strip().replace("Season ", "S") \ - + " - " + str(episodeNumber.zfill(2)).strip() + " [" + str(resolution) + "].mp4" - - try: - max_path = int(subprocess.check_output(['getconf', 'PATH_MAX', '/'])) - # print(MAX_PATH) - except (Exception): - max_path = 4096 - - if len(file_name) > max_path: - file_name = file_name[:max_path] - - return file_name diff --git a/anime_dl/common/__init__.py b/anime_dl/common/__init__.py new file mode 100644 index 0000000..4c2a07f --- /dev/null +++ b/anime_dl/common/__init__.py @@ -0,0 +1,3 @@ +import browser_instance +import downloader +import misc \ No newline at end of file diff --git a/anime_dl/common/browser_instance.py b/anime_dl/common/browser_instance.py new file mode 100644 index 0000000..4004e0c --- /dev/null +++ b/anime_dl/common/browser_instance.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from cfscrape import create_scraper +from requests import session +from bs4 import BeautifulSoup +import re + + +def page_downloader(url, scrapper_delay=5, **kwargs): + headers = kwargs.get("headers") + if not headers: + headers = { + 'User-Agent': + 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', + 'Accept-Encoding': 'gzip, deflate' + } + + sess = session() + sess = create_scraper(sess, delay=scrapper_delay) + + connection = sess.get(url, headers=headers, cookies=kwargs.get("cookies")) + + if connection.status_code != 200: + print("Whoops! Seems like I can't connect to website.") + print("It's showing : %s" % connection) + print("Run this script with the --verbose argument and report the issue along with log file on Github.") + # raise Warning("can't connect to website %s" % manga_url) + return False, None, None + else: + page_source = BeautifulSoup(connection.text.encode("utf-8"), "html.parser") + connection_cookies = sess.cookies + + return True, page_source, connection_cookies + + +def login_crunchyroll(url, username, password, country): + headers = { + 'User-Agent': + 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', + 'Referer': + 'https://www.crunchyroll.com/' + country + 'login' + } + + sess = session() + sess = create_scraper(sess) + print("Trying to login...") + + initial_page_fetch = sess.get(url='https://www.crunchyroll.com/' + country + 'login', headers=headers) + + if initial_page_fetch.status_code == 200: + initial_page_source = initial_page_fetch.text.encode("utf-8") + initial_cookies = sess.cookies + csrf_token = re.search(r'login_form\[\_token\]\"\ value\=\"(.*?)\"', + str(initial_page_source)).group(1) + + payload = { + 'login_form[name]': '%s' % username, + 'login_form[password]': '%s' % password, + 'login_form[redirect_url]': '/', + 'login_form[_token]': '%s' % csrf_token + } + + login_post = sess.post( + url='https://www.crunchyroll.com/' + country + 'login', + data=payload, + headers=headers, + cookies=initial_cookies) + + login_check_response, login_cookies = login_check(html_source=login_post.text.encode('utf-8'), cookies=login_post.cookies) + if login_check_response: + print("Logged in successfully...") + return True, login_cookies, csrf_token + else: + print("Unable to Log you in. Check credentials again.") + return False, None, None + else: + # print("Couldn't connect to the login page...") + # print("Website returned : %s" % str(initial_page_fetch.status_code)) + return False, None, None + + +def login_check(html_source, cookies=None): + # Open the page and check the title. CrunchyRoll redirects the user and the title has the text "Redirecting...". + # If this is not found, you're probably not logged in and you'll just get 360p or 480p. + if "href=\"/logout\"" in html_source: + return True, cookies + else: + print("Let me check again...") + second_try_response, html_source, cookies = page_downloader(url="https://www.crunchyroll.com/", cookies=cookies) + if second_try_response: + if "href=\"/logout\"" in html_source: + return True, cookies + else: + return False, cookies diff --git a/anime_dl/downloader.py b/anime_dl/common/downloader.py similarity index 76% rename from anime_dl/downloader.py rename to anime_dl/common/downloader.py index dbb1baa..2d0d971 100644 --- a/anime_dl/downloader.py +++ b/anime_dl/common/downloader.py @@ -6,9 +6,9 @@ from tqdm import tqdm -class downloader(object): +class Downloader(object): - def File_Downloader(self, ddl, fileName, referer, cookies): + def file_downloader(self, ddl, file_name, referer, cookies): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', 'Territory': 'US', @@ -19,9 +19,7 @@ def File_Downloader(self, ddl, fileName, referer, cookies): sess = create_scraper(sess) dlr = sess.get(ddl, stream=True, cookies = cookies, headers = headers) # Downloading the content using python. - with open(fileName, "wb") as handle: + with open(file_name, "wb") as handle: for data in tqdm(dlr.iter_content(chunk_size=1024)): # Added chunk size to speed up the downloads handle.write(data) - print("Download has been completed.") # Viola - - # coding: utf8 + print("Download has been completed.") # Viola \ No newline at end of file diff --git a/anime_dl/common/misc.py b/anime_dl/common/misc.py new file mode 100644 index 0000000..08169dc --- /dev/null +++ b/anime_dl/common/misc.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +def duplicate_remover(seq): + # https://stackoverflow.com/a/480227 + seen = set() + seen_add = seen.add + return [x for x in seq if not (x in seen or seen_add(x))] diff --git a/anime_dl/external/__init__.py b/anime_dl/external/__init__.py index 45cbf49..80fb4c9 100644 --- a/anime_dl/external/__init__.py +++ b/anime_dl/external/__init__.py @@ -1,4 +1,4 @@ -import external.aes -import external.compat -import external.socks -import external.utils \ No newline at end of file +import aes +import compat +import socks +import utils diff --git a/anime_dl/external/aes.py b/anime_dl/external/aes.py index bd76c43..356aec9 100644 --- a/anime_dl/external/aes.py +++ b/anime_dl/external/aes.py @@ -330,4 +330,4 @@ def inc(data): return data -__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text'] +__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text'] \ No newline at end of file diff --git a/anime_dl/external/compat.py b/anime_dl/external/compat.py index b257e2e..b0d071d 100644 --- a/anime_dl/external/compat.py +++ b/anime_dl/external/compat.py @@ -2928,4 +2928,4 @@ def compat_struct_unpack(spec, *args): 'compat_xml_parse_error', 'compat_xpath', 'workaround_optparse_bug9161', -] +] \ No newline at end of file diff --git a/anime_dl/external/socks.py b/anime_dl/external/socks.py index f78f665..700acb5 100644 --- a/anime_dl/external/socks.py +++ b/anime_dl/external/socks.py @@ -269,4 +269,4 @@ def connect(self, address): self._make_proxy(socket.socket.connect, address) def connect_ex(self, address): - return self._make_proxy(socket.socket.connect_ex, address) + return self._make_proxy(socket.socket.connect_ex, address) \ No newline at end of file diff --git a/anime_dl/external/utils.py b/anime_dl/external/utils.py index 823c159..08ca1d5 100644 --- a/anime_dl/external/utils.py +++ b/anime_dl/external/utils.py @@ -3574,4 +3574,4 @@ def write_xattr(path, key, value): raise XAttrUnavailableError( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " - "or the 'xattr' binary.") + "or the 'xattr' binary.") \ No newline at end of file diff --git a/anime_dl/sites/__init__.py b/anime_dl/sites/__init__.py index fb785d5..baebec3 100644 --- a/anime_dl/sites/__init__.py +++ b/anime_dl/sites/__init__.py @@ -1,3 +1 @@ -import sites.crunchyroll -import sites.funimation -import sites.vrv +import crunchyroll diff --git a/anime_dl/sites/crunchyroll.py b/anime_dl/sites/crunchyroll.py index 2a6198c..da35da4 100644 --- a/anime_dl/sites/crunchyroll.py +++ b/anime_dl/sites/crunchyroll.py @@ -1,683 +1,475 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import base64 -import logging -import os -import re -import subprocess -import sys -import zlib -from glob import glob -from hashlib import sha1 -from math import pow, sqrt, floor -from shutil import move - -import animeName -import cfscrape -import requests -from external.aes import aes_cbc_decrypt -from external.compat import compat_etree_fromstring -# External libs have been taken from youtube-dl for decoding the subtitles. -from external.utils import bytes_to_intlist, intlist_to_bytes - -'''This code Stinx. I'll write a better, faster and compact code when I get time after my exams or in mid. -I literally have NO idea what I was thinking when I wrote this piece of code. -THIS REALLY STINX! -Also, some strangers went here and wrote some more code that REALLY REALLY STINX. -Read the code at your own risk. -''' - - -class CrunchyRoll(object): - def __init__(self, url, password, username, resolution, language, skipper, logger, episode_range): - # print("Username : {0}".format(username)) - # print("Type Username : {0}".format(type(username))) - # print("Type Username : {0}".format(type(password))) - # print("Password : {0}".format(password)) - if logger == "True": - logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG, - encoding="utf-8") - - # Extract the language from the input URL - Crunchy_Language = re.search(r'.+\/([a-z]{2}|[a-z]{2}-[a-z]{2})\/.+', url) - if not Crunchy_Language: - Crunchy_Language = "/" - else: - Crunchy_Language = Crunchy_Language.group(1) + "/" - - - Crunchy_Show_regex = r'https?:\/\/(?:(?Pwww|m)\.)?(?Pcrunchyroll\.com(\/[a-z]{2}|\/[a-z]{2}-[a-z]{2})?\/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P[\w\-]+))\/?(?:\?|$)' - Crunchy_Video_regex = r'https?:\/\/(?:(?Pwww|m)\.)?(?Pcrunchyroll\.(?:com|fr)(\/[a-z]{2}|\/[a-z]{2}-[a-z]{2})?\/(?:media(?:-|\/\?id=)|[^\/]*\/[^\/?&]*?)(?P[0-9]+))(?:[\/?&]|$)' - - Crunchy_Show = re.match(Crunchy_Show_regex, url) - Crunchy_Video = re.match(Crunchy_Video_regex, url) - - if Crunchy_Video: - cookies, Token = self.webpagedownloader(url=url, username=username[0], password=password[0], country=Crunchy_Language) - if skipper == "yes": - self.onlySubs(url=url, cookies=cookies) - else: - self.singleEpisode( - url=url, cookies=cookies, token=Token, resolution=resolution) - elif Crunchy_Show: - cookies, Token = self.webpagedownloader(url=url, username=username[0], password=password[0], country=Crunchy_Language) - self.wholeShow(url=url, cookie=cookies, token=Token, language=language, resolution=resolution, - skipper=skipper, episode_range=episode_range) - else: - print("URL does not look like a show or a video, stopping.") - - def login_check(self, htmlsource): - # Open the page and check the title. CrunchyRoll redirects the user and the title has the text "Redirecting...". - # If this is not found, you're probably not logged in and you'll just get 360p or 480p. - - # titleCheck = re.search(r'\(.*?)\', - # str(htmlsource)).group(1) - # if str(titleCheck) == "Redirecting...": - # return True - # else: - # return False - return True - - def webpagedownloader(self, url, username, password, country): - - headers = { - 'User-Agent': - 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', - 'Referer': - 'https://www.crunchyroll.com/' + country + 'login' - } - - sess = requests.session() - sess = cfscrape.create_scraper(sess) - print("Trying to login...") - - initial_page_fetch = sess.get(url='https://www.crunchyroll.com/' + country + 'login', headers=headers) - - if initial_page_fetch.status_code == 200: - initial_page_source = initial_page_fetch.text.encode("utf-8") - initial_cookies = sess.cookies - csrf_token = re.search(r'login_form\[\_token\]\"\ value\=\"(.*?)\"', - str(initial_page_source)).group(1) - - payload = { - 'login_form[name]': '%s' % username, - 'login_form[password]': '%s' % password, - 'login_form[redirect_url]': '/', - 'login_form[_token]': '%s' % csrf_token - } - - login_post = sess.post( - url='https://www.crunchyroll.com/' + country + 'login', - data=payload, - headers=headers, - cookies=initial_cookies) - - # with open("login_source.html", "w") as wf: - # wf.write(login_post.text.encode('utf-8')) - - if self.login_check(htmlsource=login_post.text.encode('utf-8')): - print("Logged in successfully...") - resp = sess.get( - url=url, headers=headers, - cookies=initial_cookies) - # video_id = int(str(re.search(r'div\[media_id\=(.*?)\]', str(resp)).group(1)).strip()) - # - return initial_cookies, csrf_token - else: - print("Unable to Log you in. Check credentials again.") - else: - print("Couldn't connect to the login page...") - print("Website returned : %s" % str(initial_page_fetch.status_code)) - - def rtmpDump(self, host, file, url, filename): - # print("Downloading RTMP DUMP STREAM!") - logging.debug("Host : %s", host) - logging.debug("file : %s", file) - logging.debug("url : %s", url) - serverAddress = str(host.split("/ondemand/")[0]) + "/ondemand/" - authentication = "ondemand/" + str(host.split("/ondemand/")[1]) - - rtmpDumpCommand = "rtmpdump -r \"%s\" -a \"%s\" -f \"WIN 25,0,0,148\" -W \"http://www.crunchyroll.com/vendor/ChromelessPlayerApp-c0d121b.swf\" -p \"%s\" -y \"%s\" -o \"%s\"" % ( - serverAddress, authentication, url, file, filename) - logging.debug("rtmpDumpCommand : %s" % rtmpDumpCommand) - - try: - subprocess.call(rtmpDumpCommand, shell=True) - except Exception: - print("Please make sure that rtmpdump is present in the PATH or THIS DIRECTORY!") - sys.exit() - - def duplicate_remover(self, seq): - # https://stackoverflow.com/a/480227 - seen = set() - seen_add = seen.add - return [x for x in seq if not (x in seen or seen_add(x))] - - def singleEpisode(self, url, cookies, token, resolution): - - video_id = str(url.split('-')[-1]).replace("/", "") - logging.debug("video_id : %s", video_id) - headers = { - 'User-Agent': - 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', - 'Upgrade-Insecure-Requests': - '1', - 'Accept-Encoding': - 'gzip, deflate' - } - - sess = requests.session() - sess = cfscrape.create_scraper(sess) - - info_url = "" - - resolution_to_find = None - - if str(resolution).lower() in ['1080p', '1080', 'fhd', 'best']: - info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=108&video_quality=80¤t_page=%s" % ( - video_id, url) - resolution_to_find = "1920x1080" - - elif str(resolution).lower() in ['720p', '720', 'hd']: - info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=62¤t_page=%s" % ( - video_id, url) - resolution_to_find = "1280x720" - - elif str(resolution).lower() in ['480p', '480', 'sd']: - info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=61¤t_page=%s" % ( - video_id, url) - resolution_to_find = "848x480" - elif str(resolution).lower() in ['360p', '360', 'cancer']: - info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=60¤t_page=%s" % ( - video_id, url) - resolution_to_find = "640x360" - elif str(resolution).lower() in ['240p', '240', 'supracancer']: - info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=60¤t_page=%s" % ( - video_id, url) - resolution_to_find = "428x240" - - logging.debug("info_url : %s", info_url) - - if resolution_to_find is None: - print('Unknown requested resolution %s' % str(resolution).lower()) - return - - xml_page_connect = sess.get(url=info_url, headers=headers, cookies=cookies) - - if xml_page_connect.status_code == 200: - xml_page = xml_page_connect.text.encode("utf-8") - - try: - m3u8_file_link = str(re.search(r'(.*?)', xml_page).group(1)).replace("&", "&") - logging.debug("m3u8_file_link : %s", m3u8_file_link) - - if not m3u8_file_link: - # If no m3u8 found, try the rtmpdump... - try: - host_link = re.search(r'(.*?)', xml_page).group(1) - logging.debug("Found RTMP DUMP!") - print("RTMP streams not supported currently...") - except Exception as NoRtmpDump: - print("No RTMP Streams Found...") - print(NoRtmpDump) - else: - anime_name = re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str( - re.search(r'(.*?)', xml_page).group(1))).title().strip() - episode_number = re.search(r'(.*?)', - xml_page.decode("utf-8")).group(1) - - #video_width = re.search(r'(.*?)', xml_page.decode("utf-8")).group(1) - #video_height = re.search(r'(.*?)', xml_page.decode("utf-8")).group(1) - video_width, video_height = resolution_to_find.split("x") - - video_resolution = str(video_width) + "x" + str(video_height) - - file_name = animeName.animeName().nameEdit(animeName=anime_name, episodeNumber=episode_number, - resolution=video_resolution) - - output_directory = os.path.abspath("Output" + os.sep + str(anime_name) + "/") - # print("output_directory : {0}".format(output_directory)) - - if not os.path.exists("Output"): - os.makedirs("Output") - if not os.path.exists(output_directory): - os.makedirs(output_directory) - - file_location = str(output_directory) + os.sep + str(file_name).replace(".mp4", ".mkv") - - logging.debug("anime_name : %s", anime_name) - logging.debug("episode_number : %s", episode_number) - logging.debug("video_resolution : %s", video_resolution) - logging.debug("file_name : %s", file_name) - - if os.path.isfile(file_location): - print('[Anime-dl] File Exists! Skipping %s\n' % file_name) - pass - else: - self.subFetcher( - xml=str(xml_page), - episode_number=episode_number, - file_name=file_name) - - m3u8_file_connect = sess.get(url=m3u8_file_link, cookies=cookies, headers=headers) - try: - #m3u8_file_text = m3u8_file_connect.text.splitlines()[2] - m3u8_file_text = None - - next_line_is_good = False - for i, currentLine in enumerate(m3u8_file_connect.text.splitlines()): - if next_line_is_good: - m3u8_file_text = currentLine - logging.debug("file to download : %s", m3u8_file_text) - break - elif currentLine.startswith("#EXT-X") and resolution_to_find in currentLine: - next_line_is_good = True - - if m3u8_file_text is None: - print('Could not find the requested resolution %s in the m3u8 file\n' % file_name) - return - - logging.debug("m3u8_file_text : %s", m3u8_file_text) - - ffmpeg_command = 'ffmpeg -i "{0}" -c copy -bsf:a aac_adtstoasc "{1}/{2}"'.format( - m3u8_file_text, - os.getcwd(), - file_name) - logging.debug("ffmpeg_command : %s", ffmpeg_command) - subprocess.call(ffmpeg_command, shell=True) - - subtitles_files = [] - for sub_file in glob("*.ass"): - if sub_file.endswith(".enUS.ass"): - subtitles_files.insert(0, - "--track-name 0:English(US) --ui-language en --language 0:eng --default-track 0:yes --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - - elif sub_file.endswith(".enGB.ass"): - subtitles_files.append( - "--track-name 0:English(UK) --ui-language en --language 0:eng --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - - elif sub_file.endswith(".esLA.ass"): - subtitles_files.append( - "--track-name 0:Espanol --ui-language es --language 0:spa --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".esES.ass"): - subtitles_files.append( - "--track-name 0:Espanol(Espana) --ui-language es --language 0:spa --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".ptBR.ass"): - subtitles_files.append( - "--track-name 0:Portugues(Brasil) --ui-language pt --language 0:por --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".ptPT.ass"): - subtitles_files.append( - "--track-name 0:Portugues(Portugal) --ui-language pt --language 0:por --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".frFR.ass"): - subtitles_files.append( - "--track-name 0:Francais(France) --ui-language fr --language 0:fre --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".deDE.ass"): - subtitles_files.append( - "--track-name 0:Deutsch --ui-language de --language 0:ger --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".arME.ass"): - subtitles_files.append( - "--track-name 0:Arabic --language 0:ara --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".itIT.ass"): - subtitles_files.append( - "--track-name 0:Italiano --ui-language it --language 0:ita --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - elif sub_file.endswith(".trTR.ass"): - subtitles_files.append( - "--track-name 0:Turkce --ui-language tr --language 0:tur --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - else: - subtitles_files.append( - "--track-name 0:und --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( - os.path.realpath(sub_file)) + '" ') - - subs_files = self.duplicate_remover(subtitles_files) - logging.debug("subs_files : %s", subs_files) - - font_files = [os.path.realpath(font_file) for font_file in - glob(str(os.getcwd()) + "/Fonts/*.*")] - - fonts = '--attachment-mime-type application/x-truetype-font --attach-file "' + str( - '" --attachment-mime-type application/x-truetype-font --attach-file "'.join( - font_files)) + '"' - - if len(font_files) == 0: - fonts = '' - - mkv_merge_command = 'mkvmerge --ui-language en --output "%s" ' % str(file_name).replace( - ".mp4", - ".mkv") + '"' + str( - file_name) + '" ' + ' '.join(subs_files) + ' ' + str(fonts) - - logging.debug("mkv_merge_command : %s", mkv_merge_command) - - try: - subprocess.call(mkv_merge_command, shell=True) - - for video_file in glob("*.mkv"): - try: - move(video_file, output_directory) - except Exception as e: - print(str(e)) - pass - - for video in glob("*.mp4"): - os.remove(os.path.realpath(video)) - - for sub_file_delete in glob("*.ass"): - os.remove(os.path.realpath(sub_file_delete)) - - except Exception as FileMuxingException: - print("Sees like I couldn't mux the files.") - print("Check whether the MKVMERGE.exe is in PATH or not.") - print(FileMuxingException) - - for video_file in glob("*.mp4"): - try: - move(video_file, output_directory) - except Exception as e: - print(str(e)) - pass - for sub_files in glob("*.ass"): - try: - move(sub_files, output_directory) - except Exception as e: - print(str(e)) - pass - except Exception as NoM3u8File: - print("Couldn't connect to the m3u8 file download link...") - print(NoM3u8File) - sys.exit(1) - - except Exception as NotAvailable: - print("Seems like this video isn't available...") - print(NotAvailable) - else: - print("Could not connect to Crunchyroll's media page.") - print("It reurned : {0}".format(xml_page_connect.status_code)) - - def wholeShow(self, url, cookie, token, language, resolution, skipper, episode_range): - # print("Check my patreon for this : http://patreon.com/Xonshiz") - - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7', - 'Upgrade-Insecure-Requests': '1', - 'Accept-Encoding': 'gzip, deflate' - } - - sess = requests.session() - sess = cfscrape.create_scraper(sess) - page_source = sess.get(url=url, headers=headers, cookies=cookie).text.encode("utf-8") - # with open("New_way.html", "w") as wf: - # wf.write(page_source) - - dub_list = [] - ep_sub_list = [] - for episode_link, episode_type in re.findall( - r'\(.*?)\<\/iv\>', xml_return).group(1)).strip() - - data = str( - re.search(r'\(.*?)\<\/data\>', xml_return).group( - 1)).strip() - # logging.debug("data : %s", data) - logging.debug("iv : %s", iv) - - # print("Sub ID : %s\t| iv : %s\t| data : %s" % (sub_id, iv, data)) - subtitle = self._decrypt_subtitles(data, iv, - sub_id).decode('utf-8') - # print(subtitle) - sub_root = compat_etree_fromstring(subtitle) - sub_data = self._convert_subtitles_to_ass(sub_root) - # print(sub_root) - lang_code = str( - re.search(r'lang_code\=\"(.*?)\"', str(subtitle)).group( - 1)).strip() - sub_file_name = str(file_name).replace(".mp4", ".") + str(lang_code) + ".ass" - - print("Downloading {0} ...".format(sub_file_name)) - - try: - with open(str(os.getcwd()) + "/" + str(sub_file_name), "wb") as sub_file: - sub_file.write(sub_data.encode("utf-8")) - except Exception as EncodingException: - print("Couldn't write the subtitle file...skipping.") - pass - logging.debug("\n----- Subs Downloaded -----\n") - - def onlySubs(self, url, cookies): - # print("Running only subs") - current_directory = os.getcwd() - video_id = str(url.split('-')[-1]).replace("/", "") - # print("URL : %s\nCookies : %s\nToken : %s\nResolution : %s\nMedia ID : %s" % (url, cookies, token, resolution, video_id)) - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7', - 'Upgrade-Insecure-Requests': '1', - 'Accept-Encoding': 'gzip, deflate' - } - - sess = requests.session() - sess = cfscrape.create_scraper(sess) - infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=108&video_quality=80¤t_page=%s" % ( - video_id, url) - xml_page = sess.get(url=infoURL, headers=headers, cookies=cookies).text.encode("utf-8") - - # anime_name = re.search(r'(.*?)', xml_page).group(1) - anime_name = re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', - str(re.search(r'(.*?)', xml_page).group(1))).title().strip() - - episode_number = re.search(r'(.*?)', xml_page.decode("utf-8")).group(1) - video_width = re.search(r'(.*?)', xml_page.decode("utf-8")).group(1) - video_height = re.search(r'(.*?)', xml_page.decode("utf-8")).group(1) - - video_resolution = str(video_width) + "x" + str(video_height) - - file_name = animeName.animeName().nameEdit(animeName=anime_name, episodeNumber=episode_number, - resolution=video_resolution) - - output_directory = os.path.abspath("Output" + os.sep + str(anime_name) + os.sep) - - if not os.path.exists("Output"): - os.makedirs("Output") - - if not os.path.exists(output_directory): - os.makedirs(output_directory) - - self.subFetcher(xml=xml_page, episode_number=episode_number, file_name=file_name) - - for sub_file in glob("*.ass"): - try: - move(sub_file, current_directory + os.sep + "Output" + os.sep) - except Exception as e: - print("Couldn't move the file. Got following error : \n") - print(e) - pass - - def _decrypt_subtitles(self, data, iv, id): - data = bytes_to_intlist(base64.b64decode(data.encode('utf-8'))) - iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8'))) - id = int(id) - - def obfuscate_key_aux(count, modulo, start): - output = list(start) - for _ in range(count): - output.append(output[-1] + output[-2]) - # cut off start values - output = output[2:] - output = list(map(lambda x: x % modulo + 33, output)) - return output - - def obfuscate_key(key): - num1 = int(floor(pow(2, 25) * sqrt(6.9))) - num2 = (num1 ^ key) << 5 - num3 = key ^ num1 - num4 = num3 ^ (num3 >> 3) ^ num2 - prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) - shaHash = bytes_to_intlist( - sha1(prefix + str(num4).encode('ascii')).digest()) - # Extend 160 Bit hash to 256 Bit - return shaHash + [0] * 12 - - key = obfuscate_key(id) - - decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv)) - return zlib.decompress(decrypted_data) - - def _convert_subtitles_to_ass(self, sub_root): - output = '' - - def ass_bool(strvalue): - assvalue = '0' - if strvalue == '1': - assvalue = '-1' - return assvalue - - output = '[Script Info]\n' - output += 'Title: %s\n' % sub_root.attrib['title'] - output += 'ScriptType: v4.00+\n' - output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style'] - output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x'] - output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y'] - output += """ -[V4+ Styles] -Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding -""" - for style in sub_root.findall('./styles/style'): - output += 'Style: ' + style.attrib['name'] - output += ',' + style.attrib['font_name'] - output += ',' + style.attrib['font_size'] - output += ',' + style.attrib['primary_colour'] - output += ',' + style.attrib['secondary_colour'] - output += ',' + style.attrib['outline_colour'] - output += ',' + style.attrib['back_colour'] - output += ',' + ass_bool(style.attrib['bold']) - output += ',' + ass_bool(style.attrib['italic']) - output += ',' + ass_bool(style.attrib['underline']) - output += ',' + ass_bool(style.attrib['strikeout']) - output += ',' + style.attrib['scale_x'] - output += ',' + style.attrib['scale_y'] - output += ',' + style.attrib['spacing'] - output += ',' + style.attrib['angle'] - output += ',' + style.attrib['border_style'] - output += ',' + style.attrib['outline'] - output += ',' + style.attrib['shadow'] - output += ',' + style.attrib['alignment'] - output += ',' + style.attrib['margin_l'] - output += ',' + style.attrib['margin_r'] - output += ',' + style.attrib['margin_v'] - output += ',' + style.attrib['encoding'] - output += '\n' - - output += """ -[Events] -Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text -""" - for event in sub_root.findall('./events/event'): - output += 'Dialogue: 0' - output += ',' + event.attrib['start'] - output += ',' + event.attrib['end'] - output += ',' + event.attrib['style'] - output += ',' + event.attrib['name'] - output += ',' + event.attrib['margin_l'] - output += ',' + event.attrib['margin_r'] - output += ',' + event.attrib['margin_v'] - output += ',' + event.attrib['effect'] - output += ',' + event.attrib['text'] - output += '\n' - - return output +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import logging +import re +import anime_dl +import supporters +import os +import subprocess +from glob import glob +from shutil import move + + +class Crunchyroll(object): + def __init__(self, url, password, username, resolution, language, skipper, logger, episode_range): + if logger == "True": + logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG, + encoding="utf-8") + + # Extract the language from the input URL + crunchy_language = re.search(r'.+\/([a-z]{2}|[a-z]{2}-[a-z]{2})\/.+', url) + if not crunchy_language: + crunchy_language = "/" + else: + crunchy_language = crunchy_language.group(1) + "/" + + crunchy_show_regex = r'https?:\/\/(?:(?Pwww|m)\.)?(?Pcrunchyroll\.com(\/[a-z]{2}|\/[a-z]{2}-[a-z]{2})?\/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P[\w\-]+))\/?(?:\?|$)' + crunchy_video_regex = r'https?:\/\/(?:(?Pwww|m)\.)?(?Pcrunchyroll\.(?:com|fr)(\/[a-z]{2}|\/[a-z]{2}-[a-z]{2})?\/(?:media(?:-|\/\?id=)|[^\/]*\/[^\/?&]*?)(?P[0-9]+))(?:[\/?&]|$)' + + crunchy_show = re.match(crunchy_show_regex, url) + crunchy_video = re.match(crunchy_video_regex, url) + + login_response, cookies, token = anime_dl.common.browser_instance.login_crunchyroll(url=url, + username=username[0], + password=password[0], + country=crunchy_language) + if login_response: + if crunchy_video: + if skipper == "yes": + self.only_subs(url=url, cookies=cookies, resolution=resolution) + else: + self.single_episode(url=url, cookies=cookies, token=token, resolution=resolution) + elif crunchy_show: + self.whole_show(url=url, cookie=cookies, token=token, language=language, resolution=resolution, skipper=skipper, episode_range=episode_range) + else: + print("URL does not look like a show or a video, stopping.") + else: + print("Failed Login!!!") + exit(1) + + def single_episode(self, url, cookies, token, resolution): + video_id = str(url.split('-')[-1]).replace("/", "") + logging.debug("video_id : {0}".format(video_id)) + + response, resolution_to_find, info_url = self.resolution_finder(resolution=resolution, video_id=video_id, url=url) + + if not response: + print("No Resolution Found") + exit(1) + + response_value, xml_page_connect, xml_cookies = anime_dl.common.browser_instance.page_downloader(url=info_url, cookies=cookies) + + if xml_page_connect: + xml_page_connect = str(xml_page_connect) + stream_exists, m3u8_file_link = self.m3u8_finder(xml_page_source=xml_page_connect) + + if stream_exists: + anime_name, episode_number, video_resolution = self.episode_information_extractor(page_source=xml_page_connect, resolution_to_find=resolution_to_find) + file_name = supporters.anime_name.crunchyroll_name(anime_name=anime_name, episode_number=episode_number, resolution=video_resolution) + output_directory = supporters.path_works.path_creator(anime_name=anime_name) + file_location = str(output_directory) + os.sep + str(file_name).replace(".mp4", ".mkv") + + if os.path.isfile(file_location): + print('[anime-dl] File Exists! Skipping {0}\n'.format(file_name)) + pass + else: + subs_downloaded = supporters.sub_fetcher.crunchyroll_subs(xml=str(xml_page_connect), episode_number=episode_number, file_name=file_name) + if not subs_downloaded: + pass + m3u8_downloaded = self.m3u8_downloader(url=m3u8_file_link, cookies=xml_cookies, resolution_to_find=resolution_to_find, file_name=file_name) + if m3u8_downloaded: + sub_files = self.sub_prepare() + font_files = [os.path.realpath(font_file) for font_file in + glob(str(os.getcwd()) + "/Fonts/*.*")] + + fonts = '--attachment-mime-type application/x-truetype-font --attach-file "' + str( + '" --attachment-mime-type application/x-truetype-font --attach-file "'.join( + font_files)) + '"' + + if len(font_files) == 0: + fonts = '' + + is_stream_muxed = self.stream_muxing(file_name=file_name, subs_files=sub_files, fonts=fonts, output_directory=output_directory) + if is_stream_muxed: + is_file_moved = self.move_video_file(output_directory = output_directory) + if is_file_moved: + is_cleaned = self.material_cleaner() + if is_cleaned: + print("{0} - {1} successfully downloaded.\n".format(anime_name, episode_number)) + else: + print("Couldn't remove the leftover files.") + pass + else: + print("Couldn't move the file.") + pass + else: + print("Stream couldn't be muxed. Make sure MKVMERGE is in the path.") + pass + else: + print("Couldn't download the m3u8 file.") + pass + else: + print("Couldn't find the stream.") + pass + else: + print("Couldn't Connect To XML Page.") + pass + + def whole_show(self, url, cookie, token, language, resolution, skipper, episode_range): + response, page_source, episode_list_cookies = anime_dl.common.browser_instance.page_downloader(url=url, cookies=cookie) + + if response: + dub_list, ep_sub_list = self.episode_list_extractor(page_source=page_source, url=url) + ep_sub_list = self.sub_list_editor(episode_range=episode_range, ep_sub_list=ep_sub_list) + + if skipper == "yes": + # print("DLing everything") + print("Total Subs to download : %s" % len(ep_sub_list)) + for episode_url in ep_sub_list[::-1]: + # cookies, Token = self.webpagedownloader(url=url) + # print("Sub list : %s" % sub_list) + self.only_subs(url=episode_url, cookies=cookie, resolution=resolution) + + print("-----------------------------------------------------------") + print("\n") + else: + if str(language).lower() in ["english", "eng", "dub"]: + # If the "dub_list" is empty, that means there are no English Dubs for the show, or CR changed something. + if len(dub_list) == 0: + print("No English Dub Available For This Series.") + print( + "If you can see the Dubs, please open an Issue on https://github.com/Xonshiz/anime-dl/issues/new") + exit(1) + else: + print("Total Episodes to download : %s" % len(dub_list)) + for episode_url in dub_list[::-1]: + # cookies, Token = self.webpagedownloader(url=url) + # print("Dub list : %s" % dub_list) + try: + self.single_episode(url=episode_url, cookies=cookie, token=token, resolution=resolution) + except Exception as SomeError: + print("Error Downloading : {0}".format(SomeError)) + pass + print("-----------------------------------------------------------") + print("\n") + else: + print("Total Episodes to download : %s" % len(ep_sub_list)) + + for episode_url in ep_sub_list[::-1]: + # cookies, Token = self.webpagedownloader(url=url) + # print("Sub list : %s" % sub_list) + try: + self.single_episode(url=episode_url, cookies=cookie, token=token, resolution=resolution) + except Exception as SomeError: + print("Error Downloading : {0}".format(SomeError)) + pass + print("-----------------------------------------------------------") + print("\n") + else: + print("Couldn't connect to Crunchyroll. Failed.") + exit(1) + + def episode_list_extractor(self, page_source, url): + dub_list = [] + ep_sub_list = [] + chap_holder_div = page_source.find_all('a', {'class': 'portrait-element block-link titlefix episode'}) + + for single_node in chap_holder_div: + href_value = single_node["href"] + title_value = single_node["title"] + if "(Dub)" in str(title_value): + dub_list.append(str(url) + "/" + str(str(href_value).split("/")[-1])) + else: + ep_sub_list.append(str(url) + "/" + str(str(href_value).split("/")[-1])) + + if len(dub_list) == 0 and len(ep_sub_list) == 0: + print("Could not find the show links. Report on https://github.com/Xonshiz/anime-dl/issues/new") + exit(0) + else: + return dub_list, ep_sub_list + + def sub_list_editor(self, episode_range, ep_sub_list): + if episode_range != "All": + # -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD! + starting = int(str(episode_range).split("-")[0]) - 1 + ending = int(str(episode_range).split("-")[1]) + indexes = [x for x in range(starting, ending)] + # [::-1] in sub_list in beginning to start this from the 1st episode and at the last, it is to reverse the list again, becasue I'm reverting it again at the end. + return [ep_sub_list[::-1][x] for x in indexes][::-1] + else: + return ep_sub_list + + def episode_information_extractor(self, page_source, resolution_to_find): + anime_name = re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(re.search(r'(.*?)', page_source).group(1))).title().strip() + episode_number = re.search(r'(.*?)', page_source.decode("utf-8")).group(1) + video_width, video_height = resolution_to_find.split("x") + video_resolution = str(video_width) + "x" + str(video_height) + + return anime_name, episode_number, video_resolution + + def stream_muxing(self, file_name, subs_files, fonts, output_directory): + mkv_merge_command = 'mkvmerge --ui-language en_US --output "%s" ' % str(file_name).replace(".mp4", ".mkv") + '"' + str(file_name) + '" ' + ' '.join(subs_files) + ' ' + str(fonts) + + logging.debug("mkv_merge_command : %s", mkv_merge_command) + + try: + subprocess.check_call(mkv_merge_command, shell=True) + return True + # if call: + # return True + # else: + # return False + except Exception as FileMuxingException: + print("Sees like I couldn't mux the files.\n") + print("Check whether the MKVMERGE.exe is in PATH or not.\n") + print(str(FileMuxingException) + "\n") + fallback = self.stream_not_muxed_fallback(output_directory=output_directory) + return False + + def stream_not_muxed_fallback(self, output_directory): + try: + for video_file in glob("*.mp4"): + try: + move(video_file, output_directory) + except Exception as e: + print(str(e)) + pass + for sub_files in glob("*.ass"): + try: + move(sub_files, output_directory) + except Exception as e: + print(str(e)) + pass + return True + except Exception: + return False + + def move_video_file(self, output_directory): + try: + for video_file in glob("*.mkv"): + try: + move(video_file, output_directory) + except Exception as e: + # print(str(e)) + pass + return True + except Exception: + exit(1) + + def move_subtitle_file(self, output_directory): + try: + for video_file in glob("*.ass"): + try: + move(video_file, output_directory) + except Exception as e: + # print(str(e)) + pass + return True + except Exception: + exit(1) + + def material_cleaner(self): + try: + for video in glob("*.mp4"): + os.remove(os.path.realpath(video)) + + for sub_file_delete in glob("*.ass"): + os.remove(os.path.realpath(sub_file_delete)) + return True + except Exception: + exit(1) + + def m3u8_downloader(self, url, cookies, resolution_to_find, file_name): + response_value, m3u8_file_connect, updated_cookies = anime_dl.common.browser_instance.page_downloader(url=url, cookies=cookies) + try: + m3u8_file_text = None + + next_line_is_good = False + for i, currentLine in enumerate(m3u8_file_connect.text.splitlines()): + if next_line_is_good: + m3u8_file_text = currentLine + logging.debug("file to download : {0}".format(m3u8_file_text)) + break + elif currentLine.startswith("#EXT-X") and resolution_to_find in currentLine: + next_line_is_good = True + + if m3u8_file_text is None: + print('Could not find the requested resolution {0} in the m3u8 file\n'.format(file_name)) + exit(1) + + self.ffmpeg_call(m3u8_file_text, file_name) + return True + + except Exception: + print("Exception Occurred In m3u8 File.") + exit(1) + + def sub_prepare(self): + subtitles_files = [] + for sub_file in glob("*.ass"): + if sub_file.endswith(".enUS.ass"): + subtitles_files.insert(0, + "--track-name 0:English_US --ui-language en_US --language 0:eng --default-track 0:yes --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + + elif sub_file.endswith(".enGB.ass"): + subtitles_files.append( + "--track-name 0:English_UK --ui-language en_US --language 0:eng --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + + elif sub_file.endswith(".esLA.ass"): + subtitles_files.append( + "--track-name 0:Espanol --ui-language es_ES --language 0:spa --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".esES.ass"): + subtitles_files.append( + "--track-name 0:Espanol_Espana --ui-language es_ES --language 0:spa --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".ptBR.ass"): + subtitles_files.append( + "--track-name 0:Portugues_Brasil --ui-language pt_BR --language 0:por --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".ptPT.ass"): + subtitles_files.append( + "--track-name 0:Portugues_Portugal --ui-language pt_PT --language 0:por --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".frFR.ass"): + subtitles_files.append( + "--track-name 0:Francais_France --ui-language fr_FR --language 0:fre --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".deDE.ass"): + subtitles_files.append( + "--track-name 0:Deutsch --ui-language de_DE --language 0:ger --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".arME.ass"): + subtitles_files.append( + "--track-name 0:Arabic --language 0:ara --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".itIT.ass"): + subtitles_files.append( + "--track-name 0:Italiano --ui-language it_IT --language 0:ita --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + elif sub_file.endswith(".trTR.ass"): + subtitles_files.append( + "--track-name 0:Turkce --ui-language tr_TR --language 0:tur --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + else: + subtitles_files.append( + "--track-name 0:und --default-track 0:no --sub-charset 0:utf-8 " + '"' + str( + os.path.realpath(sub_file)) + '" ') + + subs_files = anime_dl.common.misc.duplicate_remover(subtitles_files) + logging.debug("subs_files : {0}".format(subs_files)) + return subs_files + + def only_subs(self, url, cookies, resolution): + video_id = str(url.split('-')[-1]).replace("/", "") + logging.debug("video_id : {0}".format(video_id)) + + response, resolution_to_find, info_url = self.resolution_finder(resolution=resolution, video_id=video_id, url=url) + + if not response: + print("No Resolution Found") + exit(1) + + response_value, xml_page_connect, xml_cookies = anime_dl.common.browser_instance.page_downloader(url=info_url, + cookies=cookies) + + if xml_page_connect: + xml_page_connect = str(xml_page_connect) + stream_exists, m3u8_file_link = self.m3u8_finder(xml_page_source=xml_page_connect) + + if stream_exists: + anime_name, episode_number, video_resolution = self.episode_information_extractor( + page_source=xml_page_connect, resolution_to_find=resolution_to_find) + file_name = supporters.anime_name.crunchyroll_name(anime_name=anime_name, episode_number=episode_number, + resolution=video_resolution) + output_directory = supporters.path_works.path_creator(anime_name=anime_name) + file_location = str(output_directory) + os.sep + str(file_name).replace(".mp4", ".ass") + + if os.path.isfile(file_location): + print('[anime-dl] File Exists! Skipping {0}\n'.format(file_name)) + pass + else: + subs_downloaded = supporters.sub_fetcher.crunchyroll_subs(xml=str(xml_page_connect), + episode_number=episode_number, + file_name=file_name) + if not subs_downloaded: + pass + else: + subtitles_moved = self.move_subtitle_file(output_directory) + if subtitles_moved: + return True + else: + return False + else: + print("Stream Not Found. Subtitle Downloading Failed.") + return False + + + def ffmpeg_call(self, m3u8_text, file_name): + try: + ffmpeg_command = 'ffmpeg -i "{0}" -c copy -bsf:a aac_adtstoasc "{1}/{2}"'.format(m3u8_text, os.getcwd(), + file_name) + logging.debug("ffmpeg_command : {0}\n".format(ffmpeg_command)) + call = subprocess.check_call(ffmpeg_command, shell=True) + if call: + return True + else: + return False + except Exception: + return False + + def m3u8_finder(self, xml_page_source): + m3u8_file_link = str(re.search(r'(.*?)', xml_page_source).group(1)).replace("&", "&") + logging.debug("m3u8_file_link : %s", m3u8_file_link) + + if not m3u8_file_link: + # If no m3u8 found, try the rtmpdump... + try: + host_link = re.search(r'(.*?)', xml_page_source).group(1) + logging.debug("Found RTMP DUMP!") + print("RTMP streams not supported currently...") + return False, None + except Exception as NoRtmpDump: + print("No RTMP Streams Found...") + print(NoRtmpDump) + else: + return True, m3u8_file_link + + def resolution_finder(self, resolution, video_id, url): + resolution_to_find = None + info_url = "" + + if str(resolution).lower() in ['1080p', '1080', 'fhd', 'best']: + info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=108&video_quality=80¤t_page=%s" % ( + video_id, url) + resolution_to_find = "1920x1080" + + elif str(resolution).lower() in ['720p', '720', 'hd']: + info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=62¤t_page=%s" % ( + video_id, url) + resolution_to_find = "1280x720" + + elif str(resolution).lower() in ['480p', '480', 'sd']: + info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=61¤t_page=%s" % ( + video_id, url) + resolution_to_find = "848x480" + elif str(resolution).lower() in ['360p', '360', 'cancer']: + info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=60¤t_page=%s" % ( + video_id, url) + resolution_to_find = "640x360" + elif str(resolution).lower() in ['240p', '240', 'supracancer']: + info_url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=60¤t_page=%s" % ( + video_id, url) + resolution_to_find = "428x240" + + logging.debug("info_url : {0}".format(info_url)) + + if resolution_to_find is None: + print('Unknown requested resolution %s' % str(resolution).lower()) + return False, None, None + + else: + return True, resolution_to_find, info_url diff --git a/anime_dl/sites/funimation.py b/anime_dl/sites/funimation.py deleted file mode 100644 index c265a3d..0000000 --- a/anime_dl/sites/funimation.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from cfscrape import create_scraper -from requests import session -import re -import downloader, animeName -import json - - -class Funimation(object): - def __init__(self, url, username, password, resolution, language): - - self.cookies = self.login(userUserName = username, userPassword = password) - # print("Cookies : %s\n\n\nSource : \n%s" % (self.cookies, self.pageSource)) - self.singleEpisode(url, self.cookies, resolution, language) - - def login(self, userUserName, userPassword): - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', - 'Territory': 'US' - - } - payload = {'username': '%s' % userUserName, 'password': '%s' % userPassword} - sess = session() - sess = create_scraper(sess) - - loginPost = sess.post(url='https://prod-api-funimationnow.dadcdigital.com/api/auth/login/', data=payload, - headers=headers) - - initialCookies = sess.cookies - - return initialCookies - - - def singleEpisode(self, url, userCookies, resolution, language): - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', - 'Territory': 'US', - 'Referer': 'https://www.funimation.com', - 'Origin': 'https://www.funimation.com' - - } - - sess = session() - sess = create_scraper(sess) - print("This lang : ", language) - - # print(url) - - if str(language).lower() in ["english", "dub", "eng"]: - if "?lang=" in str(url): - videoUrl = str(url).lower().replace("japanese", "english") - - else: - videoUrl = str(url) + "?lang=english" - - elif str(language).lower() in ["japanese", "sub", "jpn"]: - if "?lang=" in str(url): - videoUrl = str(url).lower().replace("english", "japanese") - else: - videoUrl = str(url) + "?lang=japanese" - else: - videoUrl = str(url) - - - fetchConnect = sess.get(videoUrl, headers = headers, cookies = userCookies).text.encode("utf-8") - - anime_name = str(str(url).split("/")[4]).strip() - - playerID = str(re.search(r'\"\/player\/(.*?)\"\>', str(fetchConnect)).group(1)) - - episodeNumber = str(re.search(r'episodeNum\:\ (.*?)\,', str(fetchConnect)).group(1)) - seasonNumber = str(re.search(r'seasonNum\:\ (.*?)\,', str(fetchConnect)).group(1)) - - - # https://prod-api-funimationnow.dadcdigital.com/api/source/catalog/video//signed/ - - headersNew = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', - 'Referer' : 'https://www.funimation.com/player/%s/?bdub=0' % playerID, - 'Origin' : 'https://www.funimation.com' - - } - - pageReferer = 'https://www.funimation.com/player/%s/?bdub=0' % playerID - - apiUrl = "https://prod-api-funimationnow.dadcdigital.com/api/source/catalog/video/%s/signed/" % playerID - jsonReply = sess.get(apiUrl, headers = headersNew, cookies = userCookies).text - - jsonLoad = json.loads(jsonReply) - mp4Link = str(jsonLoad['items'][0]['src']) - m3u8Link = str(jsonLoad['items'][1]['src']) - - - reso = str(resolution) - fileName = animeName.animeName().nameEditFuni(anime_name, seasonNumber, episodeNumber, reso) - - # downloader.downloader().File_Downloader(mp4Link.replace("English", "Japanese"), fileName, pageReferer, userCookies) - downloader.downloader().parseurl(m3u8Link) - - - - # if language.lower() in ["japanese", "sub", "jpn"]: - # finalUrl = str(url) + "?lang=english" - # s = sess.get(finalUrl, headers=headers, cookies=userCookies) - # - # elif language.lower() in ["english", "dub", "eng"]: - # finalUrl = str(url).replace("simulcast", "uncut") + "?lang=english" - # print(finalUrl) - # s = sess.get(finalUrl, headers=headers, cookies=userCookies) - # print("Got this") - # else: - # s = sess.get(url + "?lang=english", headers=headers, cookies=userCookies) - # - # cookies = sess.cookies - # - # page_source = s.text.encode('utf-8') - # htmlSource = str(BeautifulSoup(page_source, "lxml")) - # - # videoID = int(str(re.search('id\:\ \'(.*?)\'\,', htmlSource).group(1)).strip()) - # seasonNumber = int(str(re.search('seasonNum: (.*?),', htmlSource).group(1)).strip()) - # episodeNumber = int(str(re.search('episodeNum: (.*?),', htmlSource).group(1)).strip()) - # showName = str( - # re.search('KANE_customdimensions.showName\ \=\ \'(.*?)\'\;', htmlSource).group(1)).strip().replace("'", - # "'").replace( - # "&", "$") - # fileName = str(showName) + " - " + str(episodeNumber) + ".mkv" - # bDubNumber = int(str(re.search('"/player/(.*?)/\?bdub=0', htmlSource).group(1)).strip()) - # print(videoID, seasonNumber, episodeNumber, showName, bDubNumber) - # videoPlayerLink = "https://www.funimation.com/player/%s/?bdub=0" % bDubNumber - # print(videoPlayerLink) - # sleep(10) - # headersNew = { - # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', - # 'Territory': 'US', - # 'Referer' : '%s' % finalUrl - # - # } - # playerSource = sess.get(videoPlayerLink, headers=headersNew).text - # print(playerSource) - # main_m3u8Link = str(re.search('"screenshots":(.*?)"],', playerSource).group(1)).strip().replace("[\"", "").replace("exp/", "") - # print(main_m3u8Link) - # try: - # srtLink = str(re.search('"src":(.*?)\.srt"', playerSource).group(1)).strip().replace("[\"", "").replace("exp/", "") - # print(srtLink) - # except: - # pass - # - # if resolution.lower() in ["1080p", "fhd", "1080"]: - # m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer9.m3u8") - # elif resolution.lower() in ["720p", "hd", "720"]: - # m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer7.m3u8") - # elif resolution.lower() in ["540p", "sd", "540"]: - # m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer5.m3u8") - # elif resolution.lower() in ["360p", "crap", "360"]: - # m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer4.m3u8") - # elif resolution.lower() in ["270p", "cancer", "270"]: - # m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer2.m3u8") - # elif resolution.lower() in ["234p", "killme", "234"]: - # m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer1.m3u8") - # - # print(m3u8LinkFinal) - # ffmpegCommand = "ffmpeg -i \"%s\" -c copy \"%s\"" % (m3u8LinkFinal, fileName) - # call(ffmpegCommand) diff --git a/anime_dl/sites/supporters/__init__.py b/anime_dl/sites/supporters/__init__.py new file mode 100644 index 0000000..383d488 --- /dev/null +++ b/anime_dl/sites/supporters/__init__.py @@ -0,0 +1,3 @@ +import anime_name +import path_works +import sub_fetcher diff --git a/anime_dl/sites/supporters/anime_name.py b/anime_dl/sites/supporters/anime_name.py new file mode 100644 index 0000000..473ac98 --- /dev/null +++ b/anime_dl/sites/supporters/anime_name.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import re +import subprocess + + +def crunchyroll_name(anime_name, episode_number, resolution): + anime_name = str(anime_name).replace("039T", "'") + # rawName = str(animeName).title().strip().replace("Season ", "S") + " - " + \ + # str(episode_number).strip() + " [" + str(resolution) + "]" + + file_name = str(re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(anime_name))).title().strip().replace("Season ", "S") \ + + " - " + str(episode_number.zfill(2)).strip() + " [" + str(resolution) + "].mp4" + + try: + max_path = int(subprocess.check_output(['getconf', 'PATH_MAX', '/'])) + except Exception: + max_path = 4096 + + if len(file_name) > max_path: + file_name = file_name[:max_path] + + return file_name diff --git a/anime_dl/sites/supporters/path_works.py b/anime_dl/sites/supporters/path_works.py new file mode 100644 index 0000000..1ec9c1d --- /dev/null +++ b/anime_dl/sites/supporters/path_works.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import os + + +def path_creator(anime_name): + output_directory = os.path.abspath("Output" + os.sep + str(anime_name) + "/") + if not os.path.exists("Output"): + os.makedirs("Output") + if not os.path.exists(output_directory): + os.makedirs(output_directory) + return output_directory diff --git a/anime_dl/sites/supporters/sub_fetcher.py b/anime_dl/sites/supporters/sub_fetcher.py new file mode 100644 index 0000000..e8524a0 --- /dev/null +++ b/anime_dl/sites/supporters/sub_fetcher.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import anime_dl.common +from anime_dl.external.aes import aes_cbc_decrypt +from anime_dl.external.compat import compat_etree_fromstring +from anime_dl.external.utils import bytes_to_intlist, intlist_to_bytes +import re +import logging +import os +import base64 +import zlib +from hashlib import sha1 +from math import pow, sqrt, floor + + +def crunchyroll_subs(xml, episode_number, file_name): + headers = { + 'User-Agent': + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7', + 'Referer': + 'https://www.crunchyroll.com' + } + for sub_id, sub_lang, sub_lang2 in re.findall( + r'subtitle_script_id\=(.*?)\"\ title\=\"\[(.*?)\]\ (.*?)\"', + str(xml)): + xml_return = anime_dl.common.browser_instance.page_downloader(url="http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id={0}".format(sub_id), headers=headers) + + iv = str(re.search(r'\(.*?)\<\/iv\>', str(xml_return)).group(1)).strip() + data = str(re.search(r'\(.*?)\<\/data\>', str(xml_return)).group(1)).strip() + subtitle = _decrypt_subtitles(data, iv, sub_id).decode('utf-8') + sub_root = compat_etree_fromstring(subtitle) + sub_data = _convert_subtitles_to_ass(sub_root) + lang_code = str( + re.search(r'lang_code\=\"(.*?)\"', str(subtitle)).group( + 1)).strip() + sub_file_name = str(file_name).replace(".mp4", ".") + str(lang_code) + ".ass" + + print("Downloading {0} ...".format(sub_file_name)) + + try: + with open(str(os.getcwd()) + "/" + str(sub_file_name), "wb") as sub_file: + sub_file.write(sub_data.encode("utf-8")) + except Exception as EncodingException: + print("Couldn't write the subtitle file...skipping.") + pass + logging.debug("\n----- Subs Downloaded -----\n") + return True + + +def _decrypt_subtitles(data, iv, id): + data = bytes_to_intlist(base64.b64decode(data.encode('utf-8'))) + iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8'))) + id = int(id) + + def obfuscate_key_aux(count, modulo, start): + output = list(start) + for _ in range(count): + output.append(output[-1] + output[-2]) + # cut off start values + output = output[2:] + output = list(map(lambda x: x % modulo + 33, output)) + return output + + def obfuscate_key(key): + num1 = int(floor(pow(2, 25) * sqrt(6.9))) + num2 = (num1 ^ key) << 5 + num3 = key ^ num1 + num4 = num3 ^ (num3 >> 3) ^ num2 + prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) + shaHash = bytes_to_intlist( + sha1(prefix + str(num4).encode('ascii')).digest()) + # Extend 160 Bit hash to 256 Bit + return shaHash + [0] * 12 + + key = obfuscate_key(id) + + decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv)) + return zlib.decompress(decrypted_data) + + +def _convert_subtitles_to_ass(sub_root): + output = '' + + def ass_bool(strvalue): + assvalue = '0' + if strvalue == '1': + assvalue = '-1' + return assvalue + + output = '[Script Info]\n' + output += 'Title: %s\n' % sub_root.attrib['title'] + output += 'ScriptType: v4.00+\n' + output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style'] + output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x'] + output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y'] + output += """ +[V4+ Styles] +Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding +""" + for style in sub_root.findall('./styles/style'): + output += 'Style: ' + style.attrib['name'] + output += ',' + style.attrib['font_name'] + output += ',' + style.attrib['font_size'] + output += ',' + style.attrib['primary_colour'] + output += ',' + style.attrib['secondary_colour'] + output += ',' + style.attrib['outline_colour'] + output += ',' + style.attrib['back_colour'] + output += ',' + ass_bool(style.attrib['bold']) + output += ',' + ass_bool(style.attrib['italic']) + output += ',' + ass_bool(style.attrib['underline']) + output += ',' + ass_bool(style.attrib['strikeout']) + output += ',' + style.attrib['scale_x'] + output += ',' + style.attrib['scale_y'] + output += ',' + style.attrib['spacing'] + output += ',' + style.attrib['angle'] + output += ',' + style.attrib['border_style'] + output += ',' + style.attrib['outline'] + output += ',' + style.attrib['shadow'] + output += ',' + style.attrib['alignment'] + output += ',' + style.attrib['margin_l'] + output += ',' + style.attrib['margin_r'] + output += ',' + style.attrib['margin_v'] + output += ',' + style.attrib['encoding'] + output += '\n' + + output += """ +[Events] +Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text +""" + for event in sub_root.findall('./events/event'): + output += 'Dialogue: 0' + output += ',' + event.attrib['start'] + output += ',' + event.attrib['end'] + output += ',' + event.attrib['style'] + output += ',' + event.attrib['name'] + output += ',' + event.attrib['margin_l'] + output += ',' + event.attrib['margin_r'] + output += ',' + event.attrib['margin_v'] + output += ',' + event.attrib['effect'] + output += ',' + event.attrib['text'] + output += '\n' + + return output diff --git a/anime_dl/sites/vrv.py b/anime_dl/sites/vrv.py deleted file mode 100644 index 868e26a..0000000 --- a/anime_dl/sites/vrv.py +++ /dev/null @@ -1,18 +0,0 @@ -from bs4 import BeautifulSoup -import re - - -class Vrv(object): - def __init__(self, url, password, username, resolution): - self.web_page_downloader(url, username, password) - - def web_page_downloader(self, url, username, password): - # Make a function to fetch the webpage. - with open("episode.html", "r") as rf: - page_source = rf.read() - clean_html = str(BeautifulSoup(page_source, "html.parser")) - # print(clean_html) - key_pair_id = re.search(r'Key-Pair-Id=(.*?)"', clean_html).group(1) - print(key_pair_id) - policy = re.search(r'Policy=(.*?)&Key-Pair-Id=', clean_html).group(1) - print(policy) \ No newline at end of file diff --git a/anime_dl/subtitles/__init__.py b/anime_dl/subtitles/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/anime_dl/version.py b/anime_dl/version.py index ec05f10..e94785f 100644 --- a/anime_dl/version.py +++ b/anime_dl/version.py @@ -1,2 +1 @@ -# Format : YY/MM/DD -__version__ = "2018.10.11" +__version__ = "2019.05.16" diff --git a/docs/Changelog.md b/docs/Changelog.md index 96105be..f317806 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -29,4 +29,6 @@ - Fix for #39 [2018.01.27] - Fix for #46 [2018.01.29] - Fix for #45 [2018.01.29] -- Temp fix for login #65, #66 [2018.10.11] \ No newline at end of file +- Temp fix for login #65, #66 [2018.10.11] +- Login Issue Fixed [2019.05.16] +- Re-structured the code for better maintainance and re-usability. [2019.05.16] \ No newline at end of file