diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 9270bb728..000000000 --- a/.flake8 +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -max-line-length = 6000 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 deleted file mode 100644 index 229297b69..000000000 --- a/.github/workflows/.flake8 +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -max-line-length = 6000 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 85d13a19a..9a5cb970d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,7 +27,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8 + pip install ruff - name: Run linter - run: flake8 . \ No newline at end of file + run: | + ruff check . \ No newline at end of file diff --git a/cogs/commands.py b/cogs/commands.py index c40d66123..9a34dc433 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -31,14 +31,14 @@ async def on_guild_join(self, guild): """ This event receives the the guild when the bot joins. """ - print(f'Joined {guild.name} with {guild.member_count} users!') + print(f"Joined {guild.name} with {guild.member_count} users!") - @commands.command(aliases=['up']) + @commands.command(aliases=["up"]) async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): f""" Upload: for a list of arguments do {config['DISCORD']['command_prefix']}args """ - if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): + if ctx.channel.id != int(config["DISCORD"]["discord_channel_id"]): return parser = Args(config) @@ -50,32 +50,40 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): await ctx.send(parser.help) return meta = dict() - meta['base_dir'] = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + meta["base_dir"] = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) path = os.path.abspath(path) if os.path.exists(path): - meta['path'] = os.path.abspath(path) + meta["path"] = os.path.abspath(path) try: - args = (meta['path'],) + args + search_args + args = (meta["path"],) + args + search_args meta, help, before_args = parser.parse(args, meta) except SystemExit: - await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") + await ctx.send( + f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args" + ) return - if meta['imghost'] is None: - meta['imghost'] = config['DEFAULT']['img_host_1'] + if meta["imghost"] is None: + meta["imghost"] = config["DEFAULT"]["img_host_1"] # if not meta['unattended']: # ua = config['DEFAULT'].get('auto_mode', False) # if str(ua).lower() == "true": # meta['unattended'] = True - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - preparing_embed = discord.Embed(title="Preparing to upload:", description=f"```{path}```", color=0xffff00) + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=config + ) + preparing_embed = discord.Embed( + title="Preparing to upload:", + description=f"```{path}```", + color=0xFFFF00, + ) if message_id == 0: message = await ctx.send(embed=preparing_embed) - meta['embed_msg_id'] = message.id + meta["embed_msg_id"] = message.id else: message = await ctx.fetch_message(message_id) await message.edit(embed=preparing_embed) # message = await ctx.fetch_message(message_id) - meta['embed_msg_id'] = message.id + meta["embed_msg_id"] = message.id await message.clear_reactions() meta = await prep.gather_prep(meta=meta, mode="discord") # await ctx.send(file=discord.File(f"{base_dir}/tmp/{folder_id}/Mediainfo.json")) @@ -92,7 +100,7 @@ async def args(self, ctx): parser = Args(config) meta, help, before_args = parser.parse("", dict()) help = help.format_help() - help = help.split('optional')[1] + help = help.split("optional")[1] if len(help) > 2000: await ctx.send(f"```{help[:1990]}```") await ctx.send(f"```{help[1991:]}```") @@ -104,7 +112,7 @@ async def edit(self, ctx, uuid=None, *args): """ Edit uuid w/ args """ - if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): + if ctx.channel.id != int(config["DISCORD"]["discord_channel_id"]): return if uuid is None: await ctx.send("Missing ID, please try again using the ID in the footer") @@ -117,19 +125,24 @@ async def edit(self, ctx, uuid=None, *args): except FileNotFoundError: await ctx.send("ID not found, please try again using the ID in the footer") return - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + prep = Prep(screens=meta["screens"], img_host=meta["imghost"], config=config) try: - args = (meta['path'],) + args + args = (meta["path"],) + args meta, help, before_args = parser.parse(args, meta) except argparse.ArgumentError as error: ctx.send(error) - msg = await ctx.fetch_message(meta['embed_msg_id']) + msg = await ctx.fetch_message(meta["embed_msg_id"]) await msg.delete() new_msg = await msg.channel.send(f"Editing {meta['uuid']}") - meta['embed_msg_id'] = new_msg.id - meta['edit'] = True + meta["embed_msg_id"] = new_msg.id + meta["edit"] = True meta = await prep.gather_prep(meta=meta, mode="discord") - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) + ( + meta["name_notag"], + meta["name"], + meta["clean_name"], + meta["potential_missing"], + ) = await prep.get_name(meta) await self.send_embed_and_upload(ctx, meta) @commands.group(invoke_without_command=True) @@ -141,16 +154,18 @@ async def search(self, ctx, *, args=None): parser = Args(config) try: input_string = args - dict, parser, before_args = parser.parse(tuple(input_string.split(' ')), {}) + dict, parser, before_args = parser.parse(tuple(input_string.split(" ")), {}) search_terms = " ".join(before_args) - args = args.replace(search_terms, '') + args = args.replace(search_terms, "") while args.startswith(" "): args = args[1:] except SystemExit: - await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") + await ctx.send( + f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args" + ) return - if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): + if ctx.channel.id != int(config["DISCORD"]["discord_channel_id"]): return search = Search(config=config) if search_terms is None: @@ -164,21 +179,37 @@ async def search(self, ctx, *, args=None): if not files_total: embed = discord.Embed(description="No files found") elif len(files_total) >= 2: - embed = discord.Embed(title=f"File search results for: `{search_terms}`", color=0x00ff40, description=f"```• {files}```") - embed.add_field(name="What Now?", value=f"Please be more specific or use `{config['DISCORD']['command_prefix']}search dir` to find a directory") + embed = discord.Embed( + title=f"File search results for: `{search_terms}`", + color=0x00FF40, + description=f"```• {files}```", + ) + embed.add_field( + name="What Now?", + value=f"Please be more specific or use `{config['DISCORD']['command_prefix']}search dir` to find a directory", + ) message = await ctx.send(embed=embed) return elif len(files_total) == 1: - embed = discord.Embed(title=f"File search results for: {search_terms}", color=0x00ff40, description=f"```{files}```") - embed.set_footer(text=f"{config['DISCORD']['discord_emojis']['UPLOAD']} to Upload") + embed = discord.Embed( + title=f"File search results for: {search_terms}", + color=0x00FF40, + description=f"```{files}```", + ) + embed.set_footer( + text=f"{config['DISCORD']['discord_emojis']['UPLOAD']} to Upload" + ) message = await ctx.send(embed=embed) - await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) + await message.add_reaction(config["DISCORD"]["discord_emojis"]["UPLOAD"]) channel = message.channel def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: - if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: + if str(user.id) == config["DISCORD"]["admin_id"]: + if ( + str(reaction.emoji) + == config["DISCORD"]["discord_emojis"]["UPLOAD"] + ): return reaction try: @@ -186,7 +217,12 @@ def check(reaction, user): except asyncio.TimeoutError: await channel.send(f"Search: `{search_terms}`timed out") else: - await self.upload(ctx, files_total[0], search_args=tuple(args.split(" ")), message_id=message.id) + await self.upload( + ctx, + files_total[0], + search_args=tuple(args.split(" ")), + message_id=message.id, + ) @search.command() async def dir(self, ctx, *, args=None): @@ -197,16 +233,18 @@ async def dir(self, ctx, *, args=None): parser = Args(config) try: input_string = args - dict, parser, before_args = parser.parse(tuple(input_string.split(' ')), {}) + dict, parser, before_args = parser.parse(tuple(input_string.split(" ")), {}) search_terms = " ".join(before_args) - args = args.replace(search_terms, '') + args = args.replace(search_terms, "") while args.startswith(" "): args = args[1:] except SystemExit: - await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") + await ctx.send( + f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args" + ) return - if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): + if ctx.channel.id != int(config["DISCORD"]["discord_channel_id"]): return search = Search(config=config) if search_terms is None: @@ -220,21 +258,37 @@ async def dir(self, ctx, *, args=None): if not folders_total: embed = discord.Embed(description="No files found") elif len(folders_total) >= 2: - embed = discord.Embed(title=f"Directory search results for: `{search_terms}`", color=0x00ff40, description=f"```• {folders}```") - embed.add_field(name="What Now?", value=f"Please be more specific or use `{config['DISCORD']['command_prefix']}search dir` to find a directory") + embed = discord.Embed( + title=f"Directory search results for: `{search_terms}`", + color=0x00FF40, + description=f"```• {folders}```", + ) + embed.add_field( + name="What Now?", + value=f"Please be more specific or use `{config['DISCORD']['command_prefix']}search dir` to find a directory", + ) await ctx.send(embed=embed) return elif len(folders_total) == 1: - embed = discord.Embed(title=f"Directory search results for: {search_terms}", color=0x00ff40, description=f"```{folders}```") - embed.set_footer(text=f"{config['DISCORD']['discord_emojis']['UPLOAD']} to Upload") + embed = discord.Embed( + title=f"Directory search results for: {search_terms}", + color=0x00FF40, + description=f"```{folders}```", + ) + embed.set_footer( + text=f"{config['DISCORD']['discord_emojis']['UPLOAD']} to Upload" + ) message = await ctx.send(embed=embed) - await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) + await message.add_reaction(config["DISCORD"]["discord_emojis"]["UPLOAD"]) channel = message.channel def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: - if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: + if str(user.id) == config["DISCORD"]["admin_id"]: + if ( + str(reaction.emoji) + == config["DISCORD"]["discord_emojis"]["UPLOAD"] + ): return reaction try: @@ -242,147 +296,204 @@ def check(reaction, user): except asyncio.TimeoutError: await channel.send(f"Search: `{search_terms}`timed out") else: - await self.upload(ctx, path=folders_total[0], search_args=tuple(args.split(" ")), message_id=message.id) + await self.upload( + ctx, + path=folders_total[0], + search_args=tuple(args.split(" ")), + message_id=message.id, + ) # await ctx.send(folders_total) return async def send_embed_and_upload(self, ctx, meta): - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if meta.get('uploaded_screens', False) is False: - if meta.get('embed_msg_id', '0') != '0': - message = await ctx.fetch_message(meta['embed_msg_id']) - await message.edit(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) + prep = Prep(screens=meta["screens"], img_host=meta["imghost"], config=config) + ( + meta["name_notag"], + meta["name"], + meta["clean_name"], + meta["potential_missing"], + ) = await prep.get_name(meta) + + if meta.get("uploaded_screens", False) is False: + if meta.get("embed_msg_id", "0") != "0": + message = await ctx.fetch_message(meta["embed_msg_id"]) + await message.edit( + embed=discord.Embed(title="Uploading Screenshots", color=0xFFFF00) + ) else: - message = await ctx.send(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) - meta['embed_msg_id'] = message.id + message = await ctx.send( + embed=discord.Embed(title="Uploading Screenshots", color=0xFFFF00) + ) + meta["embed_msg_id"] = message.id channel = message.channel.id return_dict = multiprocessing.Manager().dict() - u = multiprocessing.Process(target=prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) + u = multiprocessing.Process( + target=prep.upload_screens, + args=(meta, meta["screens"], 1, 0, meta["screens"], [], return_dict), + ) u.start() while u.is_alive() is True: await asyncio.sleep(3) - meta['image_list'] = return_dict['image_list'] - if meta['debug']: - print(meta['image_list']) - meta['uploaded_screens'] = True + meta["image_list"] = return_dict["image_list"] + if meta["debug"]: + print(meta["image_list"]) + meta["uploaded_screens"] = True # Create base .torrent if len(glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) == 0: - if meta.get('embed_msg_id', '0') != '0': - message = await ctx.fetch_message(int(meta['embed_msg_id'])) - await message.edit(embed=discord.Embed(title="Creating .torrent", color=0xffff00)) + if meta.get("embed_msg_id", "0") != "0": + message = await ctx.fetch_message(int(meta["embed_msg_id"])) + await message.edit( + embed=discord.Embed(title="Creating .torrent", color=0xFFFF00) + ) else: - message = await ctx.send(embed=discord.Embed(title="Creating .torrent", color=0xffff00)) - meta['embed_msg_id'] = message.id + message = await ctx.send( + embed=discord.Embed(title="Creating .torrent", color=0xFFFF00) + ) + meta["embed_msg_id"] = message.id channel = message.channel - if meta['nohash'] is False: - if meta.get('torrenthash', None) is not None: - reuse_torrent = await client.find_existing_torrent(meta) # noqa F821 + if meta["nohash"] is False: + if meta.get("torrenthash", None) is not None: + reuse_torrent = await client.find_existing_torrent( + meta + ) # noqa F821 if reuse_torrent is not None: - prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) + prep.create_base_from_existing_torrent( + reuse_torrent, meta["base_dir"], meta["uuid"] + ) - p = multiprocessing.Process(target=prep.create_torrent, args=(meta, Path(meta['path']))) + p = multiprocessing.Process( + target=prep.create_torrent, args=(meta, Path(meta["path"])) + ) p.start() while p.is_alive() is True: await asyncio.sleep(5) - if int(meta.get('randomized', 0)) >= 1: - prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + if int(meta.get("randomized", 0)) >= 1: + prep.create_random_torrents( + meta["base_dir"], meta["uuid"], meta["randomized"], meta["path"] + ) else: - meta['client'] = 'none' + meta["client"] = "none" # Format for embed - if meta['tag'] == "": + if meta["tag"] == "": tag = "" else: tag = f" / {meta['tag'][1:]}" - if meta['imdb_id'] == "0": + if meta["imdb_id"] == "0": imdb = "" else: imdb = f" / [IMDb](https://www.imdb.com/title/tt{meta['imdb_id']})" - if meta['tvdb_id'] == "0": + if meta["tvdb_id"] == "0": tvdb = "" else: - tvdb = f" / [TVDB](https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series)" - if meta['is_disc'] == "DVD": - res = meta['source'] + tvdb = ( + f" / [TVDB](https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series)" + ) + if meta["is_disc"] == "DVD": + res = meta["source"] else: - res = meta['resolution'] + res = meta["resolution"] missing = await self.get_missing(meta) embed = discord.Embed( title=f"Upload: {meta['title']}", url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", - description=meta['overview'], - color=0x0080ff, - timestamp=datetime.utcnow() + description=meta["overview"], + color=0x0080FF, + timestamp=datetime.utcnow(), + ) + embed.add_field( + name="Links", + value=f"[TMDB](https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}){imdb}{tvdb}", + ) + embed.add_field( + name=f"{res} / {meta['type']}{tag}", + value=f"```{meta['name']}```", + inline=False, ) - embed.add_field(name="Links", value=f"[TMDB](https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}){imdb}{tvdb}") - embed.add_field(name=f"{res} / {meta['type']}{tag}", value=f"```{meta['name']}```", inline=False) if missing != []: - embed.add_field(name="POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) + embed.add_field( + name="POTENTIALLY MISSING INFORMATION:", + value="\n".join(missing), + inline=False, + ) embed.set_thumbnail(url=f"https://image.tmdb.org/t/p/original{meta['poster']}") - embed.set_footer(text=meta['uuid']) - embed.set_author(name="L4G's Upload Assistant", url="https://github.com/Audionut/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") + embed.set_footer(text=meta["uuid"]) + embed.set_author( + name="L4G's Upload Assistant", + url="https://github.com/Audionut/Upload-Assistant", + icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png", + ) - message = await ctx.fetch_message(meta['embed_msg_id']) + message = await ctx.fetch_message(meta["embed_msg_id"]) await message.edit(embed=embed) - if meta.get('trackers', None) is not None: - trackers = meta['trackers'] + if meta.get("trackers", None) is not None: + trackers = meta["trackers"] else: - trackers = config['TRACKERS']['default_trackers'] - trackers = trackers.split(',') + trackers = config["TRACKERS"]["default_trackers"] + trackers = trackers.split(",") for each in trackers: - if "BLU" in each.replace(' ', ''): - await message.add_reaction(config['DISCORD']['discord_emojis']['BLU']) + if "BLU" in each.replace(" ", ""): + await message.add_reaction(config["DISCORD"]["discord_emojis"]["BLU"]) await asyncio.sleep(0.3) - if "BHD" in each.replace(' ', ''): - await message.add_reaction(config['DISCORD']['discord_emojis']['BHD']) + if "BHD" in each.replace(" ", ""): + await message.add_reaction(config["DISCORD"]["discord_emojis"]["BHD"]) await asyncio.sleep(0.3) - if "AITHER" in each.replace(' ', ''): - await message.add_reaction(config['DISCORD']['discord_emojis']['AITHER']) + if "AITHER" in each.replace(" ", ""): + await message.add_reaction( + config["DISCORD"]["discord_emojis"]["AITHER"] + ) await asyncio.sleep(0.3) - if "STC" in each.replace(' ', ''): - await message.add_reaction(config['DISCORD']['discord_emojis']['STC']) + if "STC" in each.replace(" ", ""): + await message.add_reaction(config["DISCORD"]["discord_emojis"]["STC"]) await asyncio.sleep(0.3) - if "LCD" in each.replace(' ', ''): - await message.add_reaction(config['DISCORD']['discord_emojis']['LCD']) + if "LCD" in each.replace(" ", ""): + await message.add_reaction(config["DISCORD"]["discord_emojis"]["LCD"]) await asyncio.sleep(0.3) - if "CBR" in each.replace(' ', ''): - await message.add_reaction(config['DISCORD']['discord_emojis']['CBR']) + if "CBR" in each.replace(" ", ""): + await message.add_reaction(config["DISCORD"]["discord_emojis"]["CBR"]) await asyncio.sleep(0.3) - await message.add_reaction(config['DISCORD']['discord_emojis']['MANUAL']) + await message.add_reaction(config["DISCORD"]["discord_emojis"]["MANUAL"]) await asyncio.sleep(0.3) - await message.add_reaction(config['DISCORD']['discord_emojis']['CANCEL']) + await message.add_reaction(config["DISCORD"]["discord_emojis"]["CANCEL"]) await asyncio.sleep(0.3) - await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) + await message.add_reaction(config["DISCORD"]["discord_emojis"]["UPLOAD"]) # Save meta to json - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", "w") as f: json.dump(meta, f, indent=4) f.close() def check(reaction, user): - if reaction.message.id == meta['embed_msg_id']: - if str(user.id) == config['DISCORD']['admin_id']: - if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: + if reaction.message.id == meta["embed_msg_id"]: + if str(user.id) == config["DISCORD"]["admin_id"]: + if ( + str(reaction.emoji) + == config["DISCORD"]["discord_emojis"]["UPLOAD"] + ): return reaction - if str(reaction.emoji) == config['DISCORD']['discord_emojis']['CANCEL']: - if meta['embed_msg_id']: + if ( + str(reaction.emoji) + == config["DISCORD"]["discord_emojis"]["CANCEL"] + ): + if meta["embed_msg_id"]: pass raise CancelException # if str(reaction.emoji) == config['DISCORD']['discord_emojis']['MANUAL']: # raise ManualException + try: await self.bot.wait_for("reaction_add", timeout=43200, check=check) except asyncio.TimeoutError: try: - msg = await ctx.fetch_message(meta['embed_msg_id']) - timeout_embed = discord.Embed(title=f"{meta['title']} has timed out", color=0xff0000) + msg = await ctx.fetch_message(meta["embed_msg_id"]) + timeout_embed = discord.Embed( + title=f"{meta['title']} has timed out", color=0xFF0000 + ) await msg.clear_reactions() await msg.edit(embed=timeout_embed) return @@ -390,8 +501,10 @@ def check(reaction, user): print("timeout after edit") pass except CancelException: - msg = await ctx.fetch_message(meta['embed_msg_id']) - cancel_embed = discord.Embed(title=f"{meta['title']} has been cancelled", color=0xff0000) + msg = await ctx.fetch_message(meta["embed_msg_id"]) + cancel_embed = discord.Embed( + title=f"{meta['title']} has been cancelled", color=0xFF0000 + ) await msg.clear_reactions() await msg.edit(embed=cancel_embed) return @@ -400,17 +513,25 @@ def check(reaction, user): # Check which are selected and upload to them msg = await ctx.fetch_message(message.id) tracker_list = list() - tracker_emojis = config['DISCORD']['discord_emojis'] + tracker_emojis = config["DISCORD"]["discord_emojis"] while not tracker_list: await asyncio.sleep(1) for each in msg.reactions: if each.count >= 2: - tracker = list(config['DISCORD']['discord_emojis'].keys())[list(config['DISCORD']['discord_emojis'].values()).index(str(each))] + tracker = list(config["DISCORD"]["discord_emojis"].keys())[ + list(config["DISCORD"]["discord_emojis"].values()).index( + str(each) + ) + ] if tracker not in ("UPLOAD"): tracker_list.append(tracker) - upload_embed_description = ' / '.join(tracker_list) - upload_embed = discord.Embed(title=f"Uploading `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = " / ".join(tracker_list) + upload_embed = discord.Embed( + title=f"Uploading `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) await msg.clear_reactions() @@ -437,99 +558,157 @@ def check(reaction, user): cbr = CBR(config=config) await cbr.edit_desc(meta) archive_url = await prep.package(meta) - upload_embed_description = upload_embed_description.replace('MANUAL', '~~MANUAL~~') + upload_embed_description = upload_embed_description.replace( + "MANUAL", "~~MANUAL~~" + ) if archive_url is False: - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0xff0000) - upload_embed.add_field(name="Unable to upload prep files", value=f"The files can be found at `tmp/{meta['title']}.tar`") + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0xFF0000, + ) + upload_embed.add_field( + name="Unable to upload prep files", + value=f"The files can be found at `tmp/{meta['title']}.tar`", + ) await msg.edit(embed=upload_embed) else: - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - upload_embed.add_field(name="Files can be found at:", value=f"{archive_url} or `tmp/{meta['uuid']}`") + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) + upload_embed.add_field( + name="Files can be found at:", + value=f"{archive_url} or `tmp/{meta['uuid']}`", + ) await msg.edit(embed=upload_embed) if "BLU" in tracker_list: blu = BLU(config=config) dupes = await blu.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] is True: + if meta["upload"] is True: await blu.upload(meta) await client.add_to_client(meta, "BLU") - upload_embed_description = upload_embed_description.replace('BLU', '~~BLU~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = upload_embed_description.replace( + "BLU", "~~BLU~~" + ) + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) if "BHD" in tracker_list: bhd = BHD(config=config) dupes = await bhd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] is True: + if meta["upload"] is True: await bhd.upload(meta) await client.add_to_client(meta, "BHD") - upload_embed_description = upload_embed_description.replace('BHD', '~~BHD~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = upload_embed_description.replace( + "BHD", "~~BHD~~" + ) + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) if "AITHER" in tracker_list: aither = AITHER(config=config) dupes = await aither.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] is True: + if meta["upload"] is True: await aither.upload(meta) await client.add_to_client(meta, "AITHER") - upload_embed_description = upload_embed_description.replace('AITHER', '~~AITHER~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = upload_embed_description.replace( + "AITHER", "~~AITHER~~" + ) + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) if "STC" in tracker_list: stc = STC(config=config) dupes = await stc.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] is True: + if meta["upload"] is True: await stc.upload(meta) await client.add_to_client(meta, "STC") - upload_embed_description = upload_embed_description.replace('STC', '~~STC~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = upload_embed_description.replace( + "STC", "~~STC~~" + ) + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) if "LCD" in tracker_list: lcd = LCD(config=config) dupes = await lcd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] is True: + if meta["upload"] is True: await lcd.upload(meta) await client.add_to_client(meta, "LCD") - upload_embed_description = upload_embed_description.replace('LCD', '~~LCD~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = upload_embed_description.replace( + "LCD", "~~LCD~~" + ) + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) if "CBR" in tracker_list: cbr = CBR(config=config) dupes = await cbr.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] is True: + if meta["upload"] is True: await cbr.upload(meta) await client.add_to_client(meta, "CBR") - upload_embed_description = upload_embed_description.replace('CBR', '~~CBR~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + upload_embed_description = upload_embed_description.replace( + "CBR", "~~CBR~~" + ) + upload_embed = discord.Embed( + title=f"Uploaded `{meta['name']}` to:", + description=upload_embed_description, + color=0x00FF40, + ) await msg.edit(embed=upload_embed) return None async def dupe_embed(self, dupes, meta, emojis, channel): if not dupes: print("No dupes found") - meta['upload'] = True + meta["upload"] = True return meta else: dupe_text = "\n\n•".join(dupes) dupe_text = f"```•{dupe_text}```" - embed = discord.Embed(title="Check if these are actually dupes!", description=dupe_text, color=0xff0000) - embed.set_footer(text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways") + embed = discord.Embed( + title="Check if these are actually dupes!", + description=dupe_text, + color=0xFF0000, + ) + embed.set_footer( + text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways" + ) message = await channel.send(embed=embed) - await message.add_reaction(emojis['CANCEL']) + await message.add_reaction(emojis["CANCEL"]) await asyncio.sleep(0.3) - await message.add_reaction(emojis['UPLOAD']) + await message.add_reaction(emojis["UPLOAD"]) def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: - if str(reaction.emoji) == emojis['UPLOAD']: + if str(user.id) == config["DISCORD"]["admin_id"]: + if str(reaction.emoji) == emojis["UPLOAD"]: return reaction - if str(reaction.emoji) == emojis['CANCEL']: - if meta['embed_msg_id']: + if str(reaction.emoji) == emojis["CANCEL"]: + if meta["embed_msg_id"]: pass raise CancelException @@ -538,28 +717,31 @@ def check(reaction, user): except asyncio.TimeoutError: try: await channel.send(f"{meta['uuid']} timed out") - meta['upload'] = False + meta["upload"] = False except Exception: return except CancelException: await channel.send(f"{meta['title']} cancelled") - meta['upload'] = False + meta["upload"] = False else: - meta['upload'] = True + meta["upload"] = True for each in dupes: - if each == meta['name']: - meta['name'] = f"{meta['name']} DUPE?" + if each == meta["name"]: + meta["name"] = f"{meta['name']} DUPE?" finally: await message.delete() return meta async def get_missing(self, meta): missing = [] - if meta.get('imdb_id', '0') == '0': - missing.append('--imdb') - if isinstance(meta['potential_missing'], list) and len(meta['potential_missing']) > 0: - for each in meta['potential_missing']: - if meta.get(each, '').replace(' ', '') == "": + if meta.get("imdb_id", "0") == "0": + missing.append("--imdb") + if ( + isinstance(meta["potential_missing"], list) + and len(meta["potential_missing"]) > 0 + ): + for each in meta["potential_missing"]: + if meta.get(each, "").replace(" ", "") == "": missing.append(f"--{each}") return missing diff --git a/data/example-config.py b/data/example-config.py index ca13194b5..4ffe75a95 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -1,17 +1,14 @@ config = { "DEFAULT": { - # ------ READ THIS ------ # Any lines starting with the # symbol are commented and will not be used. # If you change any of these options, remove the # # ----------------------- - "tmdb_api": "tmdb_api key", "imgbb_api": "imgbb api key", "ptpimg_api": "ptpimg api key", "lensdump_api": "lensdump api key", "ptscreens_api": "ptscreens api key", - # Order of image hosts, and backup image hosts "img_host_1": "imgbb", "img_host_2": "ptpimg", @@ -19,27 +16,19 @@ "img_host_4": "pixhost", "img_host_5": "lensdump", "img_host_6": "ptscreens", - - "screens": "6", # Enable lossless PNG Compression (True/False) "optimize_images": True, - - # The name of your default torrent client, set in the torrent client sections below "default_torrent_client": "Client1", - # Play the bell sound effect when asking for confirmation "sfx_on_prompt": True, - }, - "TRACKERS": { # Which trackers do you want to upload to? # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS, ULCX # Remove the trackers from the default_trackers list that are not used, to save being asked everytime "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS, ULCX", - "BLU": { "useAPI": False, # Set to True if using BLU "api_key": "BLU api key", @@ -64,10 +53,10 @@ "useAPI": False, # Set to True if using PTP "add_web_source_to_desc": True, "ApiUser": "ptp api user", - "ApiKey": 'ptp api key', + "ApiKey": "ptp api key", "username": "", "password": "", - "announce_url": "" + "announce_url": "", }, "AITHER": { "useAPI": False, # Set to True if using Aither @@ -87,11 +76,11 @@ # "anon" : False }, "MTV": { - 'api_key': 'get from security page', - 'username': '', - 'password': '', - 'announce_url': "get from https://www.morethantv.me/upload.php", - 'anon': False, + "api_key": "get from security page", + "username": "", + "password": "", + "announce_url": "get from https://www.morethantv.me/upload.php", + "anon": False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' }, "STC": { @@ -117,7 +106,6 @@ "api_key": "ACM api key", "announce_url": "https://asiancinema.me/announce/customannounceurl", # "anon" : False, - # FOR INTERNAL USE ONLY: # "internal" : True, # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], @@ -165,7 +153,7 @@ # "anon" : False }, "PTER": { - "passkey": 'passkey', + "passkey": "passkey", "img_rehost": False, "username": "", "password": "", @@ -196,10 +184,10 @@ "RTF": { "username": "username", "password": "password", - "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', + "api_key": "get_it_by_running_/api/ login command from https://retroflix.club/api/doc", "announce_url": "get from upload page", # "tag": "RetroFlix, nd", - "anon": True + "anon": True, }, "RF": { "api_key": "RF api key", @@ -261,7 +249,6 @@ # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" }, }, - # enable_search to True will automatically try and find a suitable hash to save having to rehash when creating torrents # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes # If you find issue, use the "--debug" argument to print out some related details @@ -277,7 +264,6 @@ "qbit_user": "username", "qbit_pass": "password", # "torrent_storage_dir": "path/to/BT_backup folder" ## use double-backslash on windows eg: "C:\\client\\backup" - # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path": "/LocalPath", # "remote_path": "/RemotePath" @@ -292,10 +278,8 @@ # "torrent_storage_dir": "path/to/BT_backup folder" # "qbit_tag": "tag", # "qbit_cat": "category" - # Content Layout for adding .torrents: "Original"(recommended)/"Subfolder"/"NoSubfolder" - "content_layout": "Original" - + "content_layout": "Original", # Enable automatic torrent management if listed path(s) are present in the path # If using remote path mapping, use remote path # For using multiple paths, use a list ["path1", "path2"] @@ -303,21 +287,17 @@ # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "E:\\downloads\\tv", # "remote_path" : "/remote/downloads/tv" - # Set to False to skip verify certificate for HTTPS connections; for instance, if the connection is using a self-signed certificate. # "VERIFY_WEBUI_CERTIFICATE" : True }, - "rtorrent_sample": { "torrent_client": "rtorrent", "rtorrent_url": "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", # "torrent_storage_dir" : "path/to/session folder", # "rtorrent_label" : "Add this label to all uploads" - # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "/LocalPath", # "remote_path" : "/RemotePath" - }, "deluge_sample": { "torrent_client": "deluge", @@ -326,25 +306,21 @@ "deluge_user": "username", "deluge_pass": "password", # "torrent_storage_dir" : "path/to/session folder", - # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "/LocalPath", # "remote_path" : "/RemotePath" }, "watch_sample": { "torrent_client": "watch", - "watch_folder": "/Path/To/Watch/Folder" + "watch_folder": "/Path/To/Watch/Folder", }, - }, - "DISCORD": { "discord_bot_token": "discord bot token", "discord_bot_description": "L4G's Upload Assistant", "command_prefix": "!", "discord_channel_id": "discord channel id for use", "admin_id": "your discord user id", - "search_dir": "Path/to/downloads/folder/ this is used for search", # Alternatively, search multiple folders: # "search_dir" : [ @@ -359,7 +335,7 @@ "ACM": "🍙", "MANUAL": "📩", "UPLOAD": "✅", - "CANCEL": "🚫" - } - } + "CANCEL": "🚫", + }, + }, } diff --git a/src/args.py b/src/args.py index 66286fd85..872143547 100644 --- a/src/args.py +++ b/src/args.py @@ -7,10 +7,11 @@ from src.console import console -class Args(): +class Args: """ Parse Args """ + def __init__(self, config): self.config = config pass @@ -19,195 +20,653 @@ def parse(self, args, meta): input = args parser = argparse.ArgumentParser() - parser.add_argument('path', nargs='*', help="Path to file/directory") - parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) - parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) - parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv']) - parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV'], dest="manual_source") - parser.add_argument('-res', '--resolution', nargs='*', required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) - parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') - parser.add_argument('-imdb', '--imdb', nargs='*', required=False, help="IMDb ID", type=str) - parser.add_argument('-mal', '--mal', nargs='*', required=False, help="MAL ID", type=str) - parser.add_argument('-g', '--tag', nargs='*', required=False, help="Group Tag", type=str) - parser.add_argument('-serv', '--service', nargs='*', required=False, help="Streaming Service", type=str) - parser.add_argument('-dist', '--distributor', nargs='*', required=False, help="Disc Distributor e.g.(Criterion, BFI, etc.)", type=str) - parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default="") - parser.add_argument('-season', '--season', nargs='*', required=False, help="Season (number)", type=str) - parser.add_argument('-episode', '--episode', nargs='*', required=False, help="Episode (number)", type=str) - parser.add_argument('-daily', '--daily', nargs=1, required=False, help="Air date of this episode (YYYY-MM-DD)", type=datetime.date.fromisoformat, dest="manual_date") - parser.add_argument('--no-season', dest='no_season', action='store_true', required=False, help="Remove Season from title") - parser.add_argument('--no-year', dest='no_year', action='store_true', required=False, help="Remove Year from title") - parser.add_argument('--no-aka', dest='no_aka', action='store_true', required=False, help="Remove AKA from title") - parser.add_argument('--no-dub', dest='no_dub', action='store_true', required=False, help="Remove Dubbed from title") - parser.add_argument('--no-tag', dest='no_tag', action='store_true', required=False, help="Remove Group Tag from title") - parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") - parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Year", type=int, default=0) - parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) - parser.add_argument('-blu', '--blu', nargs='*', required=False, help="BLU torrent id/link", type=str) - parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) - parser.add_argument('-lst', '--lst', nargs='*', required=False, help="LST torrent id/link", type=str) - parser.add_argument('-oe', '--oe', nargs='*', required=False, help="OE torrent id/link", type=str) - parser.add_argument('-tik', '--tik', nargs='*', required=False, help="TIK torrent id/link", type=str) - parser.add_argument('-hdb', '--hdb', nargs='*', required=False, help="HDB torrent id/link", type=str) - parser.add_argument('--foreign', dest='foreign', action='store_true', required=False, help="Set for TIK Foreign category") - parser.add_argument('--opera', dest='opera', action='store_true', required=False, help="Set for TIK Opera & Musical category") - parser.add_argument('--asian', dest='asian', action='store_true', required=False, help="Set for TIK Asian category") - parser.add_argument('-disctype', '--disctype', nargs='*', required=False, help="Type of disc for TIK (BD100, BD66, BD50, BD25, NTSC DVD9, NTSC DVD5, PAL DVD9, PAL DVD5, Custom, 3D)", type=str) - parser.add_argument('--untouched', dest='untouched', action='store_true', required=False, help="Set when a completely untouched disc at TIK") - parser.add_argument('-d', '--desc', nargs='*', required=False, help="Custom Description (string)") - parser.add_argument('-manual_dvds', '--manual_dvds', nargs='*', required=False, help="Override the default number of DVD's (eg: use 2xDVD9+DVD5 instead)", type=str, dest='manual_dvds', default="") - parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") - parser.add_argument('-df', '--descfile', nargs='*', required=False, help="Custom Description (path to file)") - parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens']) - parser.add_argument('-siu', '--skip-imagehost-upload', dest='skip_imghost_upload', action='store_true', required=False, help="Skip Uploading to an image host") - parser.add_argument('-th', '--torrenthash', nargs='*', required=False, help="Torrent Hash to re-use from your client's session directory") - parser.add_argument('-nfo', '--nfo', action='store_true', required=False, help="Use .nfo in directory for description") - parser.add_argument('-k', '--keywords', nargs='*', required=False, help="Add comma seperated keywords e.g. 'keyword, keyword2, etc'") - parser.add_argument('-kf', '--keep-folder', action='store_true', required=False, help="Keep the folder containing the single file. Works only when supplying a directory as input. For uploads with poor filenames, like some scene.") - parser.add_argument('-reg', '--region', nargs='*', required=False, help="Region for discs") - parser.add_argument('-a', '--anon', action='store_true', required=False, help="Upload anonymously") - parser.add_argument('-st', '--stream', action='store_true', required=False, help="Stream Optimized Upload") - parser.add_argument('-webdv', '--webdv', action='store_true', required=False, help="Contains a Dolby Vision layer converted using dovi_tool") - parser.add_argument('-hc', '--hardcoded-subs', action='store_true', required=False, help="Contains hardcoded subs", dest="hardcoded-subs") - parser.add_argument('-pr', '--personalrelease', action='store_true', required=False, help="Personal Release") - parser.add_argument('-sdc', '--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") - parser.add_argument('-debug', '--debug', action='store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") - parser.add_argument('-ffdebug', '--ffdebug', action='store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") - parser.add_argument('-m', '--manual', action='store_true', required=False, help="Manual Mode. Returns link to ddl screens/base.torrent") - parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64']) - parser.add_argument('-nh', '--nohash', action='store_true', required=False, help="Don't hash .torrent") - parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") - parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD, LST)") - parser.add_argument('-mq', '--modq', action='store_true', required=False, help="Send to modQ") - parser.add_argument('-client', '--client', nargs='*', required=False, help="Use this torrent client instead of default") - parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs='*', required=False, help="Add to qbit with this tag") - parser.add_argument('-qbc', '--qbit-cat', dest='qbit_cat', nargs='*', required=False, help="Add to qbit with this category") - parser.add_argument('-rtl', '--rtorrent-label', dest='rtorrent_label', nargs='*', required=False, help="Add to rtorrent with this label") - parser.add_argument('-tk', '--trackers', nargs='*', required=False, help="Upload to these trackers, space seperated (--trackers blu bhd)") - parser.add_argument('-rt', '--randomized', nargs='*', required=False, help="Number of extra, torrents with random infohash", default=0) - parser.add_argument('-ua', '--unattended', action='store_true', required=False, help=argparse.SUPPRESS) - parser.add_argument('-vs', '--vapoursynth', action='store_true', required=False, help="Use vapoursynth for screens (requires vs install)") - parser.add_argument('-cleanup', '--cleanup', action='store_true', required=False, help="Clean up tmp directory") - parser.add_argument('-fl', '--freeleech', nargs='*', required=False, help="Freeleech Percentage", default=0, dest="freeleech") + parser.add_argument("path", nargs="*", help="Path to file/directory") + parser.add_argument( + "-s", + "--screens", + nargs="*", + required=False, + help="Number of screenshots", + default=int(self.config["DEFAULT"]["screens"]), + ) + parser.add_argument( + "-c", + "--category", + nargs="*", + required=False, + help="Category [MOVIE, TV, FANRES]", + choices=["movie", "tv", "fanres"], + ) + parser.add_argument( + "-t", + "--type", + nargs="*", + required=False, + help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", + choices=["disc", "remux", "encode", "webdl", "web-dl", "webrip", "hdtv"], + ) + parser.add_argument( + "--source", + nargs="*", + required=False, + help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV]", + choices=["Blu-ray", "BluRay", "DVD", "HDDVD", "WEB", "HDTV", "UHDTV"], + dest="manual_source", + ) + parser.add_argument( + "-res", + "--resolution", + nargs="*", + required=False, + help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", + choices=[ + "2160p", + "1080p", + "1080i", + "720p", + "576p", + "576i", + "480p", + "480i", + "8640p", + "4320p", + "other", + ], + ) + parser.add_argument( + "-tmdb", + "--tmdb", + nargs="*", + required=False, + help="TMDb ID", + type=str, + dest="tmdb_manual", + ) + parser.add_argument( + "-imdb", "--imdb", nargs="*", required=False, help="IMDb ID", type=str + ) + parser.add_argument( + "-mal", "--mal", nargs="*", required=False, help="MAL ID", type=str + ) + parser.add_argument( + "-g", "--tag", nargs="*", required=False, help="Group Tag", type=str + ) + parser.add_argument( + "-serv", + "--service", + nargs="*", + required=False, + help="Streaming Service", + type=str, + ) + parser.add_argument( + "-dist", + "--distributor", + nargs="*", + required=False, + help="Disc Distributor e.g.(Criterion, BFI, etc.)", + type=str, + ) + parser.add_argument( + "-edition", + "--edition", + "--repack", + nargs="*", + required=False, + help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", + type=str, + dest="manual_edition", + default="", + ) + parser.add_argument( + "-season", + "--season", + nargs="*", + required=False, + help="Season (number)", + type=str, + ) + parser.add_argument( + "-episode", + "--episode", + nargs="*", + required=False, + help="Episode (number)", + type=str, + ) + parser.add_argument( + "-daily", + "--daily", + nargs=1, + required=False, + help="Air date of this episode (YYYY-MM-DD)", + type=datetime.date.fromisoformat, + dest="manual_date", + ) + parser.add_argument( + "--no-season", + dest="no_season", + action="store_true", + required=False, + help="Remove Season from title", + ) + parser.add_argument( + "--no-year", + dest="no_year", + action="store_true", + required=False, + help="Remove Year from title", + ) + parser.add_argument( + "--no-aka", + dest="no_aka", + action="store_true", + required=False, + help="Remove AKA from title", + ) + parser.add_argument( + "--no-dub", + dest="no_dub", + action="store_true", + required=False, + help="Remove Dubbed from title", + ) + parser.add_argument( + "--no-tag", + dest="no_tag", + action="store_true", + required=False, + help="Remove Group Tag from title", + ) + parser.add_argument( + "-ns", + "--no-seed", + action="store_true", + required=False, + help="Do not add torrent to the client", + ) + parser.add_argument( + "-year", + "--year", + dest="manual_year", + nargs="?", + required=False, + help="Year", + type=int, + default=0, + ) + parser.add_argument( + "-ptp", + "--ptp", + nargs="*", + required=False, + help="PTP torrent id/permalink", + type=str, + ) + parser.add_argument( + "-blu", + "--blu", + nargs="*", + required=False, + help="BLU torrent id/link", + type=str, + ) + parser.add_argument( + "-aither", + "--aither", + nargs="*", + required=False, + help="Aither torrent id/link", + type=str, + ) + parser.add_argument( + "-lst", + "--lst", + nargs="*", + required=False, + help="LST torrent id/link", + type=str, + ) + parser.add_argument( + "-oe", + "--oe", + nargs="*", + required=False, + help="OE torrent id/link", + type=str, + ) + parser.add_argument( + "-tik", + "--tik", + nargs="*", + required=False, + help="TIK torrent id/link", + type=str, + ) + parser.add_argument( + "-hdb", + "--hdb", + nargs="*", + required=False, + help="HDB torrent id/link", + type=str, + ) + parser.add_argument( + "--foreign", + dest="foreign", + action="store_true", + required=False, + help="Set for TIK Foreign category", + ) + parser.add_argument( + "--opera", + dest="opera", + action="store_true", + required=False, + help="Set for TIK Opera & Musical category", + ) + parser.add_argument( + "--asian", + dest="asian", + action="store_true", + required=False, + help="Set for TIK Asian category", + ) + parser.add_argument( + "-disctype", + "--disctype", + nargs="*", + required=False, + help="Type of disc for TIK (BD100, BD66, BD50, BD25, NTSC DVD9, NTSC DVD5, PAL DVD9, PAL DVD5, Custom, 3D)", + type=str, + ) + parser.add_argument( + "--untouched", + dest="untouched", + action="store_true", + required=False, + help="Set when a completely untouched disc at TIK", + ) + parser.add_argument( + "-d", + "--desc", + nargs="*", + required=False, + help="Custom Description (string)", + ) + parser.add_argument( + "-manual_dvds", + "--manual_dvds", + nargs="*", + required=False, + help="Override the default number of DVD's (eg: use 2xDVD9+DVD5 instead)", + type=str, + dest="manual_dvds", + default="", + ) + parser.add_argument( + "-pb", + "--desclink", + nargs="*", + required=False, + help="Custom Description (link to hastebin/pastebin)", + ) + parser.add_argument( + "-df", + "--descfile", + nargs="*", + required=False, + help="Custom Description (path to file)", + ) + parser.add_argument( + "-ih", + "--imghost", + nargs="*", + required=False, + help="Image Host", + choices=["imgbb", "ptpimg", "imgbox", "pixhost", "lensdump", "ptscreens"], + ) + parser.add_argument( + "-siu", + "--skip-imagehost-upload", + dest="skip_imghost_upload", + action="store_true", + required=False, + help="Skip Uploading to an image host", + ) + parser.add_argument( + "-th", + "--torrenthash", + nargs="*", + required=False, + help="Torrent Hash to re-use from your client's session directory", + ) + parser.add_argument( + "-nfo", + "--nfo", + action="store_true", + required=False, + help="Use .nfo in directory for description", + ) + parser.add_argument( + "-k", + "--keywords", + nargs="*", + required=False, + help="Add comma seperated keywords e.g. 'keyword, keyword2, etc'", + ) + parser.add_argument( + "-kf", + "--keep-folder", + action="store_true", + required=False, + help="Keep the folder containing the single file. Works only when supplying a directory as input. For uploads with poor filenames, like some scene.", + ) + parser.add_argument( + "-reg", "--region", nargs="*", required=False, help="Region for discs" + ) + parser.add_argument( + "-a", + "--anon", + action="store_true", + required=False, + help="Upload anonymously", + ) + parser.add_argument( + "-st", + "--stream", + action="store_true", + required=False, + help="Stream Optimized Upload", + ) + parser.add_argument( + "-webdv", + "--webdv", + action="store_true", + required=False, + help="Contains a Dolby Vision layer converted using dovi_tool", + ) + parser.add_argument( + "-hc", + "--hardcoded-subs", + action="store_true", + required=False, + help="Contains hardcoded subs", + dest="hardcoded-subs", + ) + parser.add_argument( + "-pr", + "--personalrelease", + action="store_true", + required=False, + help="Personal Release", + ) + parser.add_argument( + "-sdc", + "--skip-dupe-check", + action="store_true", + required=False, + help="Pass if you know this is a dupe (Skips dupe check)", + dest="dupe", + ) + parser.add_argument( + "-debug", + "--debug", + action="store_true", + required=False, + help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.", + ) + parser.add_argument( + "-ffdebug", + "--ffdebug", + action="store_true", + required=False, + help="Will show info from ffmpeg while taking screenshots.", + ) + parser.add_argument( + "-m", + "--manual", + action="store_true", + required=False, + help="Manual Mode. Returns link to ddl screens/base.torrent", + ) + parser.add_argument( + "-mps", + "--max-piece-size", + nargs="*", + required=False, + help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", + choices=["2", "4", "8", "16", "32", "64"], + ) + parser.add_argument( + "-nh", + "--nohash", + action="store_true", + required=False, + help="Don't hash .torrent", + ) + parser.add_argument( + "-rh", + "--rehash", + action="store_true", + required=False, + help="DO hash .torrent", + ) + parser.add_argument( + "-dr", + "--draft", + action="store_true", + required=False, + help="Send to drafts (BHD, LST)", + ) + parser.add_argument( + "-mq", "--modq", action="store_true", required=False, help="Send to modQ" + ) + parser.add_argument( + "-client", + "--client", + nargs="*", + required=False, + help="Use this torrent client instead of default", + ) + parser.add_argument( + "-qbt", + "--qbit-tag", + dest="qbit_tag", + nargs="*", + required=False, + help="Add to qbit with this tag", + ) + parser.add_argument( + "-qbc", + "--qbit-cat", + dest="qbit_cat", + nargs="*", + required=False, + help="Add to qbit with this category", + ) + parser.add_argument( + "-rtl", + "--rtorrent-label", + dest="rtorrent_label", + nargs="*", + required=False, + help="Add to rtorrent with this label", + ) + parser.add_argument( + "-tk", + "--trackers", + nargs="*", + required=False, + help="Upload to these trackers, space seperated (--trackers blu bhd)", + ) + parser.add_argument( + "-rt", + "--randomized", + nargs="*", + required=False, + help="Number of extra, torrents with random infohash", + default=0, + ) + parser.add_argument( + "-ua", + "--unattended", + action="store_true", + required=False, + help=argparse.SUPPRESS, + ) + parser.add_argument( + "-vs", + "--vapoursynth", + action="store_true", + required=False, + help="Use vapoursynth for screens (requires vs install)", + ) + parser.add_argument( + "-cleanup", + "--cleanup", + action="store_true", + required=False, + help="Clean up tmp directory", + ) + parser.add_argument( + "-fl", + "--freeleech", + nargs="*", + required=False, + help="Freeleech Percentage", + default=0, + dest="freeleech", + ) args, before_args = parser.parse_known_args(input) args = vars(args) # console.print(args) - if len(before_args) >= 1 and not os.path.exists(' '.join(args['path'])): + if len(before_args) >= 1 and not os.path.exists(" ".join(args["path"])): for each in before_args: - args['path'].append(each) - if os.path.exists(' '.join(args['path'])): + args["path"].append(each) + if os.path.exists(" ".join(args["path"])): if any(".mkv" in x for x in before_args): - if ".mkv" in ' '.join(args['path']): + if ".mkv" in " ".join(args["path"]): break else: break - if meta.get('tmdb_manual') is not None or meta.get('imdb') is not None: - meta['tmdb_manual'] = meta['imdb'] = None + if meta.get("tmdb_manual") is not None or meta.get("imdb") is not None: + meta["tmdb_manual"] = meta["imdb"] = None for key in args: value = args.get(key) if value not in (None, []): if isinstance(value, list): value2 = self.list_to_string(value) - if key == 'type': - meta[key] = value2.upper().replace('-', '') - elif key == 'tag': + if key == "type": + meta[key] = value2.upper().replace("-", "") + elif key == "tag": meta[key] = f"-{value2}" - elif key == 'screens': + elif key == "screens": meta[key] = int(value2) - elif key == 'season': - meta['manual_season'] = value2 - elif key == 'episode': - meta['manual_episode'] = value2 - elif key == 'manual_date': - meta['manual_date'] = value2 - elif key == 'tmdb_manual': - meta['category'], meta['tmdb_manual'] = self.parse_tmdb_id(value2, meta.get('category')) - elif key == 'ptp': - if value2.startswith('http'): + elif key == "season": + meta["manual_season"] = value2 + elif key == "episode": + meta["manual_episode"] = value2 + elif key == "manual_date": + meta["manual_date"] = value2 + elif key == "tmdb_manual": + meta["category"], meta["tmdb_manual"] = self.parse_tmdb_id( + value2, meta.get("category") + ) + elif key == "ptp": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: - meta['ptp'] = urllib.parse.parse_qs(parsed.query)['torrentid'][0] + meta["ptp"] = urllib.parse.parse_qs(parsed.query)[ + "torrentid" + ][0] except Exception: - console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') - console.print('[red]Continuing without -ptp') + console.print( + "[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid" + ) + console.print("[red]Continuing without -ptp") else: - meta['ptp'] = value2 - elif key == 'blu': - if value2.startswith('http'): + meta["ptp"] = value2 + elif key == "blu": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: blupath = parsed.path - if blupath.endswith('/'): + if blupath.endswith("/"): blupath = blupath[:-1] - meta['blu'] = blupath.split('/')[-1] + meta["blu"] = blupath.split("/")[-1] except Exception: - console.print('[red]Unable to parse id from url') - console.print('[red]Continuing without --blu') + console.print("[red]Unable to parse id from url") + console.print("[red]Continuing without --blu") else: - meta['blu'] = value2 - elif key == 'aither': - if value2.startswith('http'): + meta["blu"] = value2 + elif key == "aither": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: aitherpath = parsed.path - if aitherpath.endswith('/'): + if aitherpath.endswith("/"): aitherpath = aitherpath[:-1] - meta['aither'] = aitherpath.split('/')[-1] + meta["aither"] = aitherpath.split("/")[-1] except Exception: - console.print('[red]Unable to parse id from url') - console.print('[red]Continuing without --aither') + console.print("[red]Unable to parse id from url") + console.print("[red]Continuing without --aither") else: - meta['aither'] = value2 - elif key == 'lst': - if value2.startswith('http'): + meta["aither"] = value2 + elif key == "lst": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: lstpath = parsed.path - if lstpath.endswith('/'): + if lstpath.endswith("/"): lstpath = lstpath[:-1] - meta['lst'] = lstpath.split('/')[-1] + meta["lst"] = lstpath.split("/")[-1] except Exception: - console.print('[red]Unable to parse id from url') - console.print('[red]Continuing without --lst') + console.print("[red]Unable to parse id from url") + console.print("[red]Continuing without --lst") else: - meta['lst'] = value2 - elif key == 'oe': - if value2.startswith('http'): + meta["lst"] = value2 + elif key == "oe": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: oepath = parsed.path - if oepath.endswith('/'): + if oepath.endswith("/"): oepath = oepath[:-1] - meta['oe'] = oepath.split('/')[-1] + meta["oe"] = oepath.split("/")[-1] except Exception: - console.print('[red]Unable to parse id from url') - console.print('[red]Continuing without --oe') + console.print("[red]Unable to parse id from url") + console.print("[red]Continuing without --oe") else: - meta['oe'] = value2 - elif key == 'tik': - if value2.startswith('http'): + meta["oe"] = value2 + elif key == "tik": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: tikpath = parsed.path - if tikpath.endswith('/'): + if tikpath.endswith("/"): tikpath = tikpath[:-1] - meta['tik'] = tikpath.split('/')[-1] + meta["tik"] = tikpath.split("/")[-1] except Exception: - console.print('[red]Unable to parse id from url') - console.print('[red]Continuing without --tik') + console.print("[red]Unable to parse id from url") + console.print("[red]Continuing without --tik") else: - meta['tik'] = value2 - elif key == 'hdb': - if value2.startswith('http'): + meta["tik"] = value2 + elif key == "hdb": + if value2.startswith("http"): parsed = urllib.parse.urlparse(value2) try: - meta['hdb'] = urllib.parse.parse_qs(parsed.query)['id'][0] + meta["hdb"] = urllib.parse.parse_qs(parsed.query)["id"][ + 0 + ] except Exception: - console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') - console.print('[red]Continuing without -hdb') + console.print( + "[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid" + ) + console.print("[red]Continuing without -hdb") else: - meta['hdb'] = value2 + meta["hdb"] = value2 else: meta[key] = value2 @@ -223,10 +682,10 @@ def parse(self, args, meta): meta[key] = "" else: meta[key] = meta.get(key, None) - if key in ('trackers'): + if key in ("trackers"): meta[key] = value # if key == 'help' and value == True: - # parser.print_help() + # parser.print_help() return meta, parser, before_args def list_to_string(self, list): @@ -240,12 +699,12 @@ def list_to_string(self, list): def parse_tmdb_id(self, id, category): id = id.lower().lstrip() - if id.startswith('tv'): - id = id.split('/')[1] - category = 'TV' - elif id.startswith('movie'): - id = id.split('/')[1] - category = 'MOVIE' + if id.startswith("tv"): + id = id.split("/")[1] + category = "TV" + elif id.startswith("movie"): + id = id.split("/")[1] + category = "MOVIE" else: id = id return category, id diff --git a/src/bbcode.py b/src/bbcode.py index 983469c4e..24efa5bb3 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -44,33 +44,80 @@ def clean_ptp_description(self, desc, is_disc): # Unescape html desc = html.unescape(desc) - desc = desc.replace('\r\n', '\n') + desc = desc.replace("\r\n", "\n") # Remove url tags with PTP/HDB links - url_tags = re.findall(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - url_tags += re.findall(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + url_tags = re.findall( + r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", + desc, + flags=re.IGNORECASE, + ) + url_tags += re.findall( + r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", + desc, + flags=re.IGNORECASE, + ) if url_tags: for url_tag in url_tags: - url_tag = ''.join(url_tag) - url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) - url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) + url_tag = "".join(url_tag) + url_tag_removed = re.sub( + r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", + "", + url_tag, + flags=re.IGNORECASE, + ) + url_tag_removed = re.sub( + r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", + "", + url_tag_removed, + flags=re.IGNORECASE, + ) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(url_tag, url_tag_removed) # Remove links to PTP/HDB - desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') - desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') + desc = desc.replace("http://passthepopcorn.me", "PTP").replace( + "https://passthepopcorn.me", "PTP" + ) + desc = desc.replace("http://hdbits.org", "HDB").replace( + "https://hdbits.org", "HDB" + ) # Remove Mediainfo Tags / Attempt to regex out mediainfo mediainfo_tags = re.findall(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) if mediainfo_tags: desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) elif is_disc != "BDMV": - desc = re.sub(r"(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub(r"(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub(r"(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub( + r"(^general\nunique)(.*?)^$", + "", + desc, + flags=re.MULTILINE | re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"(^general\ncomplete)(.*?)^$", + "", + desc, + flags=re.MULTILINE | re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"(^(Format[\s]{2,}:))(.*?)^$", + "", + desc, + flags=re.MULTILINE | re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", + "", + desc, + flags=re.MULTILINE | re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"(^(menu)( #\d+)?\n)(.*?)^$", + "", + f"{desc}\n\n", + flags=re.MULTILINE | re.IGNORECASE | re.DOTALL, + ) elif any(x in is_disc for x in ["BDMV", "DVD"]): return "", [] @@ -94,15 +141,20 @@ def clean_ptp_description(self, desc, is_disc): # Remove Movie/Person/User/hr/Indent remove_list = [ - '[movie]', '[/movie]', - '[artist]', '[/artist]', - '[user]', '[/user]', - '[indent]', '[/indent]', - '[size]', '[/size]', - '[hr]' + "[movie]", + "[/movie]", + "[artist]", + "[/artist]", + "[user]", + "[/user]", + "[indent]", + "[/indent]", + "[size]", + "[/size]", + "[hr]", ] for each in remove_list: - desc = desc.replace(each, '') + desc = desc.replace(each, "") # Catch Stray Images and Prepare Image List imagelist = [] @@ -114,7 +166,7 @@ def clean_ptp_description(self, desc, is_disc): # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images for i, comp in enumerate(comps): - nocomp = nocomp.replace(comp, '') + nocomp = nocomp.replace(comp, "") desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") comp_placeholders.append(comp) @@ -123,16 +175,18 @@ def clean_ptp_description(self, desc, is_disc): desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) # Extract loose images and add to imagelist as dictionaries - loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + loose_images = re.findall( + r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE + ) if loose_images: for img_url in loose_images: image_dict = { - 'img_url': img_url, - 'raw_url': img_url, - 'web_url': img_url # Since there is no distinction here, use the same URL for all + "img_url": img_url, + "raw_url": img_url, + "web_url": img_url, # Since there is no distinction here, use the same URL for all } imagelist.append(image_dict) - desc = desc.replace(img_url, '') + desc = desc.replace(img_url, "") # Re-place comparisons for i, comp in enumerate(comp_placeholders): @@ -143,13 +197,13 @@ def clean_ptp_description(self, desc, is_disc): desc = self.convert_collapse_to_comparison(desc, "hide", hides) # Strip blank lines: - desc = desc.strip('\n') + desc = desc.strip("\n") desc = re.sub("\n\n+", "\n\n", desc) - while desc.startswith('\n'): - desc = desc.replace('\n', '', 1) - desc = desc.strip('\n') + while desc.startswith("\n"): + desc = desc.replace("\n", "", 1) + desc = desc.strip("\n") - if desc.replace('\n', '').strip() == '': + if desc.replace("\n", "").strip() == "": console.print("[yellow]Description is empty after cleaning.") return "", imagelist @@ -159,28 +213,30 @@ def clean_unit3d_description(self, desc, site): # Unescape HTML desc = html.unescape(desc) # Replace carriage returns with newlines - desc = desc.replace('\r\n', '\n') + desc = desc.replace("\r\n", "\n") # Remove links to site site_netloc = urllib.parse.urlparse(site).netloc - site_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" + site_regex = ( + rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" + ) site_url_tags = re.findall(site_regex, desc) if site_url_tags: for site_url_tag in site_url_tags: - site_url_tag = ''.join(site_url_tag) + site_url_tag = "".join(site_url_tag) url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" url_tag_removed = re.sub(url_tag_regex, "", site_url_tag) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(site_url_tag, url_tag_removed) - desc = desc.replace(site_netloc, site_netloc.split('.')[0]) + desc = desc.replace(site_netloc, site_netloc.split(".")[0]) # Temporarily hide spoiler tags spoilers = re.findall(r"\[spoiler[\s\S]*?\[\/spoiler\]", desc) nospoil = desc spoiler_placeholders = [] for i in range(len(spoilers)): - nospoil = nospoil.replace(spoilers[i], '') + nospoil = nospoil.replace(spoilers[i], "") desc = desc.replace(spoilers[i], f"SPOILER_PLACEHOLDER-{i} ") spoiler_placeholders.append(spoilers[i]) @@ -190,19 +246,29 @@ def clean_unit3d_description(self, desc, site): if img_tags: for img_url in img_tags: image_dict = { - 'img_url': img_url.strip(), - 'raw_url': img_url.strip(), - 'web_url': img_url.strip(), + "img_url": img_url.strip(), + "raw_url": img_url.strip(), + "web_url": img_url.strip(), } imagelist.append(image_dict) # Remove the [img] tag and its contents from the description - desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) + desc = re.sub( + rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", + "", + desc, + flags=re.IGNORECASE, + ) # Now, remove matching URLs from [URL] tags for img in imagelist: - img_url = re.escape(img['img_url']) - desc = re.sub(rf"\[URL={img_url}\]\[/URL\]", '', desc, flags=re.IGNORECASE) - desc = re.sub(rf"\[URL={img_url}\]\[img[^\]]*\]{img_url}\[/img\]\[/URL\]", '', desc, flags=re.IGNORECASE) + img_url = re.escape(img["img_url"]) + desc = re.sub(rf"\[URL={img_url}\]\[/URL\]", "", desc, flags=re.IGNORECASE) + desc = re.sub( + rf"\[URL={img_url}\]\[img[^\]]*\]{img_url}\[/img\]\[/URL\]", + "", + desc, + flags=re.IGNORECASE, + ) # Filter out bot images from imagelist bot_image_urls = [ @@ -210,7 +276,7 @@ def clean_unit3d_description(self, desc, site): "https://i.ibb.co/2NVWb0c/uploadrr.webp", # Add any other known bot image URLs here ] - imagelist = [img for img in imagelist if img['img_url'] not in bot_image_urls] + imagelist = [img for img in imagelist if img["img_url"] not in bot_image_urls] # Restore spoiler tags if spoiler_placeholders: @@ -222,11 +288,11 @@ def clean_unit3d_description(self, desc, site): if centers: for center in centers: # If [center] contains only whitespace or empty tags, remove the entire tag - cleaned_center = re.sub(r'\[center\]\s*\[\/center\]', '', center) - cleaned_center = re.sub(r'\[center\]\s+', '[center]', cleaned_center) - cleaned_center = re.sub(r'\s*\[\/center\]', '[/center]', cleaned_center) - if cleaned_center == '[center][/center]': - desc = desc.replace(center, '') + cleaned_center = re.sub(r"\[center\]\s*\[\/center\]", "", center) + cleaned_center = re.sub(r"\[center\]\s+", "[center]", cleaned_center) + cleaned_center = re.sub(r"\s*\[\/center\]", "[/center]", cleaned_center) + if cleaned_center == "[center][/center]": + desc = desc.replace(center, "") else: desc = desc.replace(center, cleaned_center.strip()) @@ -240,7 +306,12 @@ def clean_unit3d_description(self, desc, site): \[center\]\[url=https:\/\/github\.com\/z-ink\/uploadrr\]\[img=\d+\]https:\/\/i\.ibb\.co\/2NVWb0c\/uploadrr\.webp\[\/img\]\[\/url\]\[\/center\] """ desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) - desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + desc = re.sub( + r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", + "", + desc, + flags=re.IGNORECASE, + ) # Remove leftover [img] or [URL] tags in the description desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) @@ -250,23 +321,23 @@ def clean_unit3d_description(self, desc, site): # Strip trailing whitespace and newlines: desc = desc.rstrip() - if desc.replace('\n', '') == '': + if desc.replace("\n", "") == "": return "", imagelist return desc, imagelist def convert_pre_to_code(self, desc): - desc = desc.replace('[pre]', '[code]') - desc = desc.replace('[/pre]', '[/code]') + desc = desc.replace("[pre]", "[code]") + desc = desc.replace("[/pre]", "[/code]") return desc def convert_hide_to_spoiler(self, desc): - desc = desc.replace('[hide', '[spoiler') - desc = desc.replace('[/hide]', '[/spoiler]') + desc = desc.replace("[hide", "[spoiler") + desc = desc.replace("[/hide]", "[/spoiler]") return desc def convert_spoiler_to_hide(self, desc): - desc = desc.replace('[spoiler', '[hide') - desc = desc.replace('[/spoiler]', '[/hide]') + desc = desc.replace("[spoiler", "[hide") + desc = desc.replace("[/spoiler]", "[/hide]") return desc def remove_spoiler(self, desc): @@ -274,13 +345,13 @@ def remove_spoiler(self, desc): return desc def convert_spoiler_to_code(self, desc): - desc = desc.replace('[spoiler', '[code') - desc = desc.replace('[/spoiler]', '[/code]') + desc = desc.replace("[spoiler", "[code") + desc = desc.replace("[/spoiler]", "[/code]") return desc def convert_code_to_quote(self, desc): - desc = desc.replace('[code', '[quote') - desc = desc.replace('[/code]', '[/quote]') + desc = desc.replace("[code", "[quote") + desc = desc.replace("[/code]", "[/quote]") return desc def convert_comparison_to_collapse(self, desc, max_width): @@ -288,9 +359,21 @@ def convert_comparison_to_collapse(self, desc, max_width): for comp in comparisons: line = [] output = [] - comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') - comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_sources = ( + comp.split("]", 1)[0] + .replace("[comparison=", "") + .replace(" ", "") + .split(",") + ) + comp_images = ( + comp.split("]", 1)[1] + .replace("[/comparison]", "") + .replace(",", "\n") + .replace(" ", "\n") + ) + comp_images = re.findall( + r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE + ) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -301,9 +384,9 @@ def convert_comparison_to_collapse(self, desc, max_width): bb = f"[url={img}][img={img_size}]{img}[/img][/url]" line.append(bb) if len(line) == screens_per_line: - output.append(''.join(line)) + output.append("".join(line)) line = [] - output = '\n'.join(output) + output = "\n".join(output) new_bbcode = f"[spoiler={' vs '.join(comp_sources)}][center]{' | '.join(comp_sources)}[/center]\n{output}[/spoiler]" desc = desc.replace(comp, new_bbcode) return desc @@ -313,9 +396,21 @@ def convert_comparison_to_centered(self, desc, max_width): for comp in comparisons: line = [] output = [] - comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') - comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_sources = ( + comp.split("]", 1)[0] + .replace("[comparison=", "") + .replace(" ", "") + .split(",") + ) + comp_images = ( + comp.split("]", 1)[1] + .replace("[/comparison]", "") + .replace(",", "\n") + .replace(" ", "\n") + ) + comp_images = re.findall( + r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE + ) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -326,9 +421,9 @@ def convert_comparison_to_centered(self, desc, max_width): bb = f"[url={img}][img={img_size}]{img}[/img][/url]" line.append(bb) if len(line) == screens_per_line: - output.append(''.join(line)) + output.append("".join(line)) line = [] - output = '\n'.join(output) + output = "\n".join(output) new_bbcode = f"[center]{' | '.join(comp_sources)}\n{output}[/center]" desc = desc.replace(comp, new_bbcode) return desc @@ -343,21 +438,32 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): comp_images = [] final_sources = [] for image in images: - image_url = re.sub(r"\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) + image_url = re.sub( + r"\[img[\s\S]*\]", + "", + image.replace("[/img]", ""), + flags=re.IGNORECASE, + ) comp_images.append(image_url) if spoiler_hide == "spoiler": - sources = re.match(r"\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] + sources = re.match(r"\[spoiler[\s\S]*?\]", tag)[0].replace( + "[spoiler=", "" + )[:-1] elif spoiler_hide == "hide": - sources = re.match(r"\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] + sources = re.match(r"\[hide[\s\S]*?\]", tag)[0].replace( + "[hide=", "" + )[:-1] sources = re.sub("comparison", "", sources, flags=re.IGNORECASE) - for each in ['vs', ',', '|']: + for each in ["vs", ",", "|"]: sources = sources.split(each) sources = "$".join(sources) sources = sources.split("$") for source in sources: final_sources.append(source.strip()) - comp_images = '\n'.join(comp_images) - final_sources = ', '.join(final_sources) - spoil2comp = f"[comparison={final_sources}]{comp_images}[/comparison]" + comp_images = "\n".join(comp_images) + final_sources = ", ".join(final_sources) + spoil2comp = ( + f"[comparison={final_sources}]{comp_images}[/comparison]" + ) desc = desc.replace(tag, spoil2comp) return desc diff --git a/src/clients.py b/src/clients.py index 1b97da004..548bb6686 100644 --- a/src/clients.py +++ b/src/clients.py @@ -15,102 +15,154 @@ from src.console import console -class Clients(): +class Clients: """ Add to torrent client """ + def __init__(self, config): self.config = config pass async def add_to_client(self, meta, tracker): torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent" - if meta.get('no_seed', False) is True: - console.print("[bold red]--no-seed was passed, so the torrent will not be added to the client") + if meta.get("no_seed", False) is True: + console.print( + "[bold red]--no-seed was passed, so the torrent will not be added to the client" + ) console.print("[bold yellow]Add torrent manually to the client") return if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) else: return - if meta.get('client', None) is None: - default_torrent_client = self.config['DEFAULT']['default_torrent_client'] + if meta.get("client", None) is None: + default_torrent_client = self.config["DEFAULT"]["default_torrent_client"] else: - default_torrent_client = meta['client'] - if meta.get('client', None) == 'none': + default_torrent_client = meta["client"] + if meta.get("client", None) == "none": return if default_torrent_client == "none": return - client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_client = client['torrent_client'] + client = self.config["TORRENT_CLIENTS"][default_torrent_client] + torrent_client = client["torrent_client"] local_path, remote_path = await self.remote_path_map(meta) console.print(f"[bold green]Adding to {torrent_client}") if torrent_client.lower() == "rtorrent": - self.rtorrent(meta['path'], torrent_path, torrent, meta, local_path, remote_path, client) + self.rtorrent( + meta["path"], + torrent_path, + torrent, + meta, + local_path, + remote_path, + client, + ) elif torrent_client == "qbit": - await self.qbittorrent(meta['path'], torrent, local_path, remote_path, client, meta['is_disc'], meta['filelist'], meta) + await self.qbittorrent( + meta["path"], + torrent, + local_path, + remote_path, + client, + meta["is_disc"], + meta["filelist"], + meta, + ) elif torrent_client.lower() == "deluge": - if meta['type'] == "DISC": - path = os.path.dirname(meta['path']) # noqa F841 - self.deluge(meta['path'], torrent_path, torrent, local_path, remote_path, client, meta) + if meta["type"] == "DISC": + path = os.path.dirname(meta["path"]) # noqa F841 + self.deluge( + meta["path"], + torrent_path, + torrent, + local_path, + remote_path, + client, + meta, + ) elif torrent_client.lower() == "watch": - shutil.copy(torrent_path, client['watch_folder']) + shutil.copy(torrent_path, client["watch_folder"]) return async def find_existing_torrent(self, meta): - if meta.get('client', None) is None: - default_torrent_client = self.config['DEFAULT']['default_torrent_client'] + if meta.get("client", None) is None: + default_torrent_client = self.config["DEFAULT"]["default_torrent_client"] else: - default_torrent_client = meta['client'] - if meta.get('client', None) == 'none' or default_torrent_client == 'none': + default_torrent_client = meta["client"] + if meta.get("client", None) == "none" or default_torrent_client == "none": return None - client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_storage_dir = client.get('torrent_storage_dir', None) - torrent_client = client.get('torrent_client', None).lower() + client = self.config["TORRENT_CLIENTS"][default_torrent_client] + torrent_storage_dir = client.get("torrent_storage_dir", None) + torrent_client = client.get("torrent_client", None).lower() if torrent_storage_dir is None and torrent_client != "watch": - console.print(f'[bold red]Missing torrent_storage_dir for {default_torrent_client}') + console.print( + f"[bold red]Missing torrent_storage_dir for {default_torrent_client}" + ) return None elif not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": - console.print(f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}") + console.print( + f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}" + ) torrenthash = None if torrent_storage_dir is not None and os.path.exists(torrent_storage_dir): - if meta.get('torrenthash', None) is not None: - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", meta['torrenthash'], torrent_client, print_err=True) + if meta.get("torrenthash", None) is not None: + valid, torrent_path = await self.is_valid_torrent( + meta, + f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", + meta["torrenthash"], + torrent_client, + print_err=True, + ) if valid: - torrenthash = meta['torrenthash'] - elif meta.get('ext_torrenthash', None) is not None: - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", meta['ext_torrenthash'], torrent_client, print_err=True) + torrenthash = meta["torrenthash"] + elif meta.get("ext_torrenthash", None) is not None: + valid, torrent_path = await self.is_valid_torrent( + meta, + f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", + meta["ext_torrenthash"], + torrent_client, + print_err=True, + ) if valid: - torrenthash = meta['ext_torrenthash'] - if torrent_client == 'qbit' and torrenthash is None and client.get('enable_search') is True: + torrenthash = meta["ext_torrenthash"] + if ( + torrent_client == "qbit" + and torrenthash is None + and client.get("enable_search") is True + ): torrenthash = await self.search_qbit_for_torrent(meta, client) if not torrenthash: console.print("[bold yellow]No Valid .torrent found") if not torrenthash: return None torrent_path = f"{torrent_storage_dir}/{torrenthash}.torrent" - valid2, torrent_path = await self.is_valid_torrent(meta, torrent_path, torrenthash, torrent_client, print_err=False) + valid2, torrent_path = await self.is_valid_torrent( + meta, torrent_path, torrenthash, torrent_client, print_err=False + ) if valid2: return torrent_path return None - async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): + async def is_valid_torrent( + self, meta, torrent_path, torrenthash, torrent_client, print_err=False + ): valid = False wrong_file = False err_print = "" # Normalize the torrent hash based on the client - if torrent_client in ('qbit', 'deluge'): + if torrent_client in ("qbit", "deluge"): torrenthash = torrenthash.lower().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) - elif torrent_client == 'rtorrent': + elif torrent_client == "rtorrent": torrenthash = torrenthash.upper().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) - if meta['debug']: + if meta["debug"]: console.log(f"Torrent path after normalization: {torrent_path}") # Check if torrent file exists @@ -118,58 +170,79 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client try: torrent = Torrent.read(torrent_path) except Exception as e: - console.print(f'[bold red]Error reading torrent file: {e}') + console.print(f"[bold red]Error reading torrent file: {e}") return valid, torrent_path # Reuse if disc and basename matches or --keep-folder was specified - if meta.get('is_disc', None) is not None or (meta['keep_folder'] and meta['isdir']): + if meta.get("is_disc", None) is not None or ( + meta["keep_folder"] and meta["isdir"] + ): torrent_filepath = os.path.commonpath(torrent.files) - if os.path.basename(meta['path']) in torrent_filepath: + if os.path.basename(meta["path"]) in torrent_filepath: valid = True - if meta['debug']: - console.log(f"Torrent is valid based on disc/basename or keep-folder: {valid}") + if meta["debug"]: + console.log( + f"Torrent is valid based on disc/basename or keep-folder: {valid}" + ) # If one file, check for folder - if len(torrent.files) == len(meta['filelist']) == 1: - if os.path.basename(torrent.files[0]) == os.path.basename(meta['filelist'][0]): + if len(torrent.files) == len(meta["filelist"]) == 1: + if os.path.basename(torrent.files[0]) == os.path.basename( + meta["filelist"][0] + ): if str(torrent.files[0]) == os.path.basename(torrent.files[0]): valid = True else: wrong_file = True - if meta['debug']: - console.log(f"Single file match status: valid={valid}, wrong_file={wrong_file}") + if meta["debug"]: + console.log( + f"Single file match status: valid={valid}, wrong_file={wrong_file}" + ) # Check if number of files matches number of videos - elif len(torrent.files) == len(meta['filelist']): + elif len(torrent.files) == len(meta["filelist"]): torrent_filepath = os.path.commonpath(torrent.files) - actual_filepath = os.path.commonpath(meta['filelist']) + actual_filepath = os.path.commonpath(meta["filelist"]) local_path, remote_path = await self.remote_path_map(meta) - if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): - actual_filepath = actual_filepath.replace(local_path, remote_path).replace(os.sep, '/') + if ( + local_path.lower() in meta["path"].lower() + and local_path.lower() != remote_path.lower() + ): + actual_filepath = actual_filepath.replace( + local_path, remote_path + ).replace(os.sep, "/") - if meta['debug']: + if meta["debug"]: console.log(f"Torrent_filepath: {torrent_filepath}") console.log(f"Actual_filepath: {actual_filepath}") if torrent_filepath in actual_filepath: valid = True - if meta['debug']: + if meta["debug"]: console.log(f"Multiple file match status: valid={valid}") else: - console.print(f'[bold yellow]{torrent_path} was not found') + console.print(f"[bold yellow]{torrent_path} was not found") # Additional checks if the torrent is valid so far if valid: if os.path.exists(torrent_path): try: reuse_torrent = Torrent.read(torrent_path) - if meta['debug']: - console.log(f"Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") + if meta["debug"]: + console.log( + f"Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}" + ) # Piece size and count validations - if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): + if ( + reuse_torrent.pieces >= 7000 + and reuse_torrent.piece_size < 8388608 + ) or ( + reuse_torrent.pieces >= 4000 + and reuse_torrent.piece_size < 4194304 + ): err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" valid = False elif reuse_torrent.piece_size < 32768: @@ -179,15 +252,15 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client err_print = "[bold red] Provided .torrent has files that were not expected" valid = False else: - err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + err_print = f"[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}" except Exception as e: - console.print(f'[bold red]Error checking reuse torrent: {e}') + console.print(f"[bold red]Error checking reuse torrent: {e}") valid = False - if meta['debug']: + if meta["debug"]: console.log(f"Final validity after piece checks: valid={valid}") else: - err_print = '[bold yellow]Unwanted Files/Folders Identified' + err_print = "[bold yellow]Unwanted Files/Folders Identified" # Print the error message if needed if print_err: @@ -197,18 +270,29 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client async def search_qbit_for_torrent(self, meta, client): console.print("[green]Searching qbittorrent for an existing .torrent") - torrent_storage_dir = client.get('torrent_storage_dir', None) - if meta['debug']: + torrent_storage_dir = client.get("torrent_storage_dir", None) + if meta["debug"]: if torrent_storage_dir: console.print(f"Torrent storage directory found: {torrent_storage_dir}") else: console.print("No torrent storage directory found.") - if torrent_storage_dir is None and client.get("torrent_client", None) != "watch": - console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") + if ( + torrent_storage_dir is None + and client.get("torrent_client", None) != "watch" + ): + console.print( + f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}" + ) return None try: - qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + qbt_client = qbittorrentapi.Client( + host=client["qbit_url"], + port=client["qbit_port"], + username=client["qbit_user"], + password=client["qbit_pass"], + VERIFY_WEBUI_CERTIFICATE=client.get("VERIFY_WEBUI_CERTIFICATE", True), + ) qbt_client.auth_log_in() except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") @@ -220,9 +304,12 @@ async def search_qbit_for_torrent(self, meta, client): # Remote path map if needed remote_path_map = False local_path, remote_path = await self.remote_path_map(meta) - if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): + if ( + local_path.lower() in meta["path"].lower() + and local_path.lower() != remote_path.lower() + ): remote_path_map = True - if meta['debug']: + if meta["debug"]: console.print("Remote path mapping found!") console.print(f"Local path: {local_path}") console.print(f"Remote path: {remote_path}") @@ -230,31 +317,55 @@ async def search_qbit_for_torrent(self, meta, client): torrents = qbt_client.torrents.info() for torrent in torrents: try: - torrent_path = torrent.get('content_path', f"{torrent.save_path}{torrent.name}") + torrent_path = torrent.get( + "content_path", f"{torrent.save_path}{torrent.name}" + ) except AttributeError: - if meta['debug']: + if meta["debug"]: console.print(torrent) console.print_exception() continue if remote_path_map: torrent_path = torrent_path.replace(remote_path, local_path) - torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) - - if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: - if torrent_path == meta['filelist'][0] and len(torrent.files) == len(meta['filelist']): - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) + torrent_path = torrent_path.replace(os.sep, "/").replace("/", os.sep) + + if meta["is_disc"] in ("", None) and len(meta["filelist"]) == 1: + if torrent_path == meta["filelist"][0] and len(torrent.files) == len( + meta["filelist"] + ): + valid, torrent_path = await self.is_valid_torrent( + meta, + f"{torrent_storage_dir}/{torrent.hash}.torrent", + torrent.hash, + "qbit", + print_err=False, + ) if valid: - console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") + console.print( + f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}" + ) return torrent.hash - elif meta['path'] == torrent_path: - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) + elif meta["path"] == torrent_path: + valid, torrent_path = await self.is_valid_torrent( + meta, + f"{torrent_storage_dir}/{torrent.hash}.torrent", + torrent.hash, + "qbit", + print_err=False, + ) if valid: - console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") + console.print( + f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}" + ) return torrent.hash return None - def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, client): - rtorrent = xmlrpc.client.Server(client['rtorrent_url'], context=ssl._create_stdlib_context()) + def rtorrent( + self, path, torrent_path, torrent, meta, local_path, remote_path, client + ): + rtorrent = xmlrpc.client.Server( + client["rtorrent_url"], context=ssl._create_stdlib_context() + ) metainfo = bencode.bread(torrent_path) try: fast_resume = self.add_fast_resume(metainfo, path, torrent) @@ -264,7 +375,7 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c new_meta = bencode.bencode(fast_resume) if new_meta != metainfo: - fr_file = torrent_path.replace('.torrent', '-resume.torrent') + fr_file = torrent_path.replace(".torrent", "-resume.torrent") console.print("Creating fast resume") bencode.bwrite(fast_resume, fr_file) @@ -273,10 +384,13 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c # path = os.path.dirname(path) # Remote path mount modified_fr = False - if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): + if ( + local_path.lower() in path.lower() + and local_path.lower() != remote_path.lower() + ): path_dir = os.path.dirname(path) path = path.replace(local_path, remote_path) - path = path.replace(os.sep, '/') + path = path.replace(os.sep, "/") shutil.copy(fr_file, f"{path_dir}/fr.torrent") fr_file = f"{os.path.dirname(path)}/fr.torrent" modified_fr = True @@ -284,22 +398,24 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c path = os.path.dirname(path) console.print("[bold yellow]Adding and starting torrent") - rtorrent.load.start_verbose('', fr_file, f"d.directory_base.set={path}") + rtorrent.load.start_verbose("", fr_file, f"d.directory_base.set={path}") time.sleep(1) # Add labels - if client.get('rtorrent_label', None) is not None: - rtorrent.d.custom1.set(torrent.infohash, client['rtorrent_label']) - if meta.get('rtorrent_label') is not None: - rtorrent.d.custom1.set(torrent.infohash, meta['rtorrent_label']) + if client.get("rtorrent_label", None) is not None: + rtorrent.d.custom1.set(torrent.infohash, client["rtorrent_label"]) + if meta.get("rtorrent_label") is not None: + rtorrent.d.custom1.set(torrent.infohash, meta["rtorrent_label"]) # Delete modified fr_file location if modified_fr: os.remove(f"{path_dir}/fr.torrent") - if meta['debug']: + if meta["debug"]: console.print(f"[cyan]Path: {path}") return - async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta): + async def qbittorrent( + self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta + ): # infohash = torrent.infohash # Remote path mount isdir = os.path.isdir(path) @@ -307,12 +423,21 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d path = os.path.dirname(path) if len(filelist) != 1: path = os.path.dirname(path) - if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): + if ( + local_path.lower() in path.lower() + and local_path.lower() != remote_path.lower() + ): path = path.replace(local_path, remote_path) - path = path.replace(os.sep, '/') + path = path.replace(os.sep, "/") if not path.endswith(os.sep): path = f"{path}/" - qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + qbt_client = qbittorrentapi.Client( + host=client["qbit_url"], + port=client["qbit_port"], + username=client["qbit_user"], + password=client["qbit_pass"], + VERIFY_WEBUI_CERTIFICATE=client.get("VERIFY_WEBUI_CERTIFICATE", True), + ) console.print("[bold yellow]Adding and rechecking torrent") try: qbt_client.auth_log_in() @@ -320,19 +445,31 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") return auto_management = False - am_config = client.get('automatic_management_paths', '') + am_config = client.get("automatic_management_paths", "") if isinstance(am_config, list): for each in am_config: if os.path.normpath(each).lower() in os.path.normpath(path).lower(): auto_management = True else: - if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": + if ( + os.path.normpath(am_config).lower() in os.path.normpath(path).lower() + and am_config.strip() != "" + ): auto_management = True - qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') - - content_layout = client.get('content_layout', 'Original') - - qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, content_layout=content_layout, category=qbt_category) + qbt_category = ( + client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get("qbit_cat") + ) + + content_layout = client.get("content_layout", "Original") + + qbt_client.torrents_add( + torrent_files=torrent.dump(), + save_path=path, + use_auto_torrent_management=auto_management, + is_skip_checking=True, + content_layout=content_layout, + category=qbt_category, + ) # Wait for up to 30 seconds for qbit to actually return the download # there's an async race conditiion within qbt that it will return ok before the torrent is actually added for _ in range(0, 30): @@ -340,45 +477,65 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d break await asyncio.sleep(1) qbt_client.torrents_resume(torrent.infohash) - if client.get('qbit_tag', None) is not None: - qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) - if meta.get('qbit_tag') is not None: - qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) + if client.get("qbit_tag", None) is not None: + qbt_client.torrents_add_tags( + tags=client.get("qbit_tag"), torrent_hashes=torrent.infohash + ) + if meta.get("qbit_tag") is not None: + qbt_client.torrents_add_tags( + tags=meta.get("qbit_tag"), torrent_hashes=torrent.infohash + ) console.print(f"Added to: {path}") - def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, meta): - client = DelugeRPCClient(client['deluge_url'], int(client['deluge_port']), client['deluge_user'], client['deluge_pass']) + def deluge( + self, path, torrent_path, torrent, local_path, remote_path, client, meta + ): + client = DelugeRPCClient( + client["deluge_url"], + int(client["deluge_port"]), + client["deluge_user"], + client["deluge_pass"], + ) # client = LocalDelugeRPCClient() client.connect() if client.connected is True: console.print("Connected to Deluge") isdir = os.path.isdir(path) # noqa F841 # Remote path mount - if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): + if ( + local_path.lower() in path.lower() + and local_path.lower() != remote_path.lower() + ): path = path.replace(local_path, remote_path) - path = path.replace(os.sep, '/') + path = path.replace(os.sep, "/") path = os.path.dirname(path) - client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location': path, 'seed_mode': True}) - if meta['debug']: + client.call( + "core.add_torrent_file", + torrent_path, + base64.b64encode(torrent.dump()), + {"download_location": path, "seed_mode": True}, + ) + if meta["debug"]: console.print(f"[cyan]Path: {path}") else: console.print("[bold red]Unable to connect to deluge") def add_fast_resume(self, metainfo, datapath, torrent): - """ Add fast resume data to a metafile dict. - """ + """Add fast resume data to a metafile dict.""" # Get list of files files = metainfo["info"].get("files", None) single = files is None if single: if os.path.isdir(datapath): datapath = os.path.join(datapath, metainfo["info"]["name"]) - files = [Bunch( - path=[os.path.abspath(datapath)], - length=metainfo["info"]["length"], - )] + files = [ + Bunch( + path=[os.path.abspath(datapath)], + length=metainfo["info"]["length"], + ) + ] # Prepare resume data resume = metainfo.setdefault("libtorrent_resume", {}) @@ -395,33 +552,45 @@ def add_fast_resume(self, metainfo, datapath, torrent): # Check file size if os.path.getsize(filepath) != fileinfo["length"]: - raise OSError(errno.EINVAL, "File size mismatch for %r [is %d, expected %d]" % ( - filepath, os.path.getsize(filepath), fileinfo["length"], - )) + raise OSError( + errno.EINVAL, + "File size mismatch for %r [is %d, expected %d]" + % ( + filepath, + os.path.getsize(filepath), + fileinfo["length"], + ), + ) # Add resume data for this file - resume["files"].append(dict( - priority=1, - mtime=int(os.path.getmtime(filepath)), - completed=( - (offset + fileinfo["length"] + piece_length - 1) // piece_length - - offset // piece_length - ), - )) + resume["files"].append( + dict( + priority=1, + mtime=int(os.path.getmtime(filepath)), + completed=( + (offset + fileinfo["length"] + piece_length - 1) // piece_length + - offset // piece_length + ), + ) + ) offset += fileinfo["length"] return metainfo async def remote_path_map(self, meta): - if meta.get('client', None) is None: - torrent_client = self.config['DEFAULT']['default_torrent_client'] + if meta.get("client", None) is None: + torrent_client = self.config["DEFAULT"]["default_torrent_client"] else: - torrent_client = meta['client'] - local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path', '/LocalPath') - remote_path = list_remote_path = self.config['TORRENT_CLIENTS'][torrent_client].get('remote_path', '/RemotePath') + torrent_client = meta["client"] + local_path = list_local_path = self.config["TORRENT_CLIENTS"][ + torrent_client + ].get("local_path", "/LocalPath") + remote_path = list_remote_path = self.config["TORRENT_CLIENTS"][ + torrent_client + ].get("remote_path", "/RemotePath") if isinstance(local_path, list): for i in range(len(local_path)): - if os.path.normpath(local_path[i]).lower() in meta['path'].lower(): + if os.path.normpath(local_path[i]).lower() in meta["path"].lower(): list_local_path = local_path[i] list_remote_path = remote_path[i] diff --git a/src/console.py b/src/console.py index 223c51181..a9463afd5 100644 --- a/src/console.py +++ b/src/console.py @@ -1,2 +1,3 @@ from rich.console import Console + console = Console() diff --git a/src/discparse.py b/src/discparse.py index b4e6b246e..ed6090c07 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -11,60 +11,82 @@ from src.console import console -class DiscParse(): +class DiscParse: def __init__(self): pass """ Get and parse bdinfo """ + async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): save_dir = f"{base_dir}/tmp/{folder_id}" if not os.path.exists(save_dir): os.mkdir(save_dir) for i in range(len(discs)): bdinfo_text = None - path = os.path.abspath(discs[i]['path']) + path = os.path.abspath(discs[i]["path"]) for file in os.listdir(save_dir): if file == f"BD_SUMMARY_{str(i).zfill(2)}.txt": bdinfo_text = save_dir + "/" + file if bdinfo_text is None or meta_discs == []: if os.path.exists(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt"): - bdinfo_text = os.path.abspath(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt") + bdinfo_text = os.path.abspath( + f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt" + ) else: bdinfo_text = "" - if sys.platform.startswith('linux') or sys.platform.startswith('darwin'): + if sys.platform.startswith("linux") or sys.platform.startswith( + "darwin" + ): try: # await asyncio.subprocess.Process(['mono', "bin/BDInfo/BDInfo.exe", "-w", path, save_dir]) console.print(f"[bold green]Scanning {path}") - proc = await asyncio.create_subprocess_exec('mono', f"{base_dir}/bin/BDInfo/BDInfo.exe", '-w', path, save_dir) + proc = await asyncio.create_subprocess_exec( + "mono", + f"{base_dir}/bin/BDInfo/BDInfo.exe", + "-w", + path, + save_dir, + ) await proc.wait() except Exception: - console.print('[bold red]mono not found, please install mono') + console.print( + "[bold red]mono not found, please install mono" + ) - elif sys.platform.startswith('win32'): + elif sys.platform.startswith("win32"): # await asyncio.subprocess.Process(["bin/BDInfo/BDInfo.exe", "-w", path, save_dir]) console.print(f"[bold green]Scanning {path}") - proc = await asyncio.create_subprocess_exec(f"{base_dir}/bin/BDInfo/BDInfo.exe", "-w", path, save_dir) + proc = await asyncio.create_subprocess_exec( + f"{base_dir}/bin/BDInfo/BDInfo.exe", "-w", path, save_dir + ) await proc.wait() await asyncio.sleep(1) else: - console.print("[red]Not sure how to run bdinfo on your platform, get support please thanks.") + console.print( + "[red]Not sure how to run bdinfo on your platform, get support please thanks." + ) while True: try: if bdinfo_text == "": for file in os.listdir(save_dir): if file.startswith("BDINFO"): bdinfo_text = save_dir + "/" + file - with open(bdinfo_text, 'r') as f: + with open(bdinfo_text, "r") as f: text = f.read() result = text.split("QUICK SUMMARY:", 2) - files = result[0].split("FILES:", 2)[1].split("CHAPTERS:", 2)[0].split("-------------") + files = ( + result[0] + .split("FILES:", 2)[1] + .split("CHAPTERS:", 2)[0] + .split("-------------") + ) result2 = result[1].rstrip(" \n") result = result2.split("********************", 1) bd_summary = result[0].rstrip(" \n") f.close() - with open(bdinfo_text, 'r') as f: # parse extended BDInfo + with open(bdinfo_text, "r") as f: # parse extended BDInfo text = f.read() result = text.split("[code]", 3) result2 = result[2].rstrip(" \n") @@ -72,7 +94,9 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): ext_bd_summary = result[0].rstrip(" \n") f.close() try: - shutil.copyfile(bdinfo_text, f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt") + shutil.copyfile( + bdinfo_text, f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt" + ) os.remove(bdinfo_text) except shutil.SameFileError: pass @@ -81,48 +105,50 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): await asyncio.sleep(5) continue break - with open(f"{save_dir}/BD_SUMMARY_{str(i).zfill(2)}.txt", 'w') as f: + with open(f"{save_dir}/BD_SUMMARY_{str(i).zfill(2)}.txt", "w") as f: f.write(bd_summary.strip()) f.close() - with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file + with open( + f"{save_dir}/BD_SUMMARY_EXT.txt", "w" + ) as f: # write extended BDInfo file f.write(ext_bd_summary.strip()) f.close() bdinfo = self.parse_bdinfo(bd_summary, files[1], path) - discs[i]['summary'] = bd_summary.strip() - discs[i]['bdinfo'] = bdinfo + discs[i]["summary"] = bd_summary.strip() + discs[i]["bdinfo"] = bdinfo # shutil.rmtree(f"{base_dir}/tmp") else: discs = meta_discs - return discs, discs[0]['bdinfo'] + return discs, discs[0]["bdinfo"] def parse_bdinfo(self, bdinfo_input, files, path): bdinfo = dict() - bdinfo['video'] = list() - bdinfo['audio'] = list() - bdinfo['subtitles'] = list() - bdinfo['path'] = path + bdinfo["video"] = list() + bdinfo["audio"] = list() + bdinfo["subtitles"] = list() + bdinfo["path"] = path lines = bdinfo_input.splitlines() for l in lines: # noqa E741 line = l.strip().lower() if line.startswith("*"): line = l.replace("*", "").strip().lower() if line.startswith("playlist:"): - playlist = l.split(':', 1)[1] - bdinfo['playlist'] = playlist.split('.', 1)[0].strip() + playlist = l.split(":", 1)[1] + bdinfo["playlist"] = playlist.split(".", 1)[0].strip() if line.startswith("disc size:"): - size = l.split(':', 1)[1] - size = size.split('bytes', 1)[0].replace(',', '') + size = l.split(":", 1)[1] + size = size.split("bytes", 1)[0].replace(",", "") size = float(size) / float(1 << 30) - bdinfo['size'] = size + bdinfo["size"] = size if line.startswith("length:"): - length = l.split(':', 1)[1] - bdinfo['length'] = length.split('.', 1)[0].strip() + length = l.split(":", 1)[1] + bdinfo["length"] = length.split(".", 1)[0].strip() if line.startswith("video:"): - split1 = l.split(':', 1)[1] - split2 = split1.split('/', 12) + split1 = l.split(":", 1)[1] + split2 = split1.split("/", 12) while len(split2) != 9: split2.append("") n = 0 @@ -139,24 +165,26 @@ def parse_bdinfo(self, bdinfo_input, files, path): bit_depth = "" hdr_dv = "" color = "" - bdinfo['video'].append({ - 'codec': split2[0].strip(), - 'bitrate': split2[1].strip(), - 'res': split2[n + 2].strip(), - 'fps': split2[n + 3].strip(), - 'aspect_ratio': split2[n + 4].strip(), - 'profile': split2[n + 5].strip(), - 'bit_depth': bit_depth, - 'hdr_dv': hdr_dv, - 'color': color, - '3d': three_dim, - }) + bdinfo["video"].append( + { + "codec": split2[0].strip(), + "bitrate": split2[1].strip(), + "res": split2[n + 2].strip(), + "fps": split2[n + 3].strip(), + "aspect_ratio": split2[n + 4].strip(), + "profile": split2[n + 5].strip(), + "bit_depth": bit_depth, + "hdr_dv": hdr_dv, + "color": color, + "3d": three_dim, + } + ) elif line.startswith("audio:"): if "(" in l: l = l.split("(")[0] # noqa E741 l = l.strip() # noqa E741 - split1 = l.split(':', 1)[1] - split2 = split1.split('/') + split1 = l.split(":", 1)[1] + split2 = split1.split("/") n = 0 if "Atmos" in split2[2].strip(): n = 1 @@ -167,27 +195,29 @@ def parse_bdinfo(self, bdinfo_input, files, path): bit_depth = split2[n + 5].strip() except Exception: bit_depth = "" - bdinfo['audio'].append({ - 'language': split2[0].strip(), - 'codec': split2[1].strip(), - 'channels': split2[n + 2].strip(), - 'sample_rate': split2[n + 3].strip(), - 'bitrate': split2[n + 4].strip(), - 'bit_depth': bit_depth, # Also DialNorm, but is not in use anywhere yet - 'atmos_why_you_be_like_this': fuckatmos, - }) + bdinfo["audio"].append( + { + "language": split2[0].strip(), + "codec": split2[1].strip(), + "channels": split2[n + 2].strip(), + "sample_rate": split2[n + 3].strip(), + "bitrate": split2[n + 4].strip(), + "bit_depth": bit_depth, # Also DialNorm, but is not in use anywhere yet + "atmos_why_you_be_like_this": fuckatmos, + } + ) elif line.startswith("disc title:"): - title = l.split(':', 1)[1] - bdinfo['title'] = title + title = l.split(":", 1)[1] + bdinfo["title"] = title elif line.startswith("disc label:"): - label = l.split(':', 1)[1] - bdinfo['label'] = label - elif line.startswith('subtitle:'): - split1 = l.split(':', 1)[1] - split2 = split1.split('/') - bdinfo['subtitles'].append(split2[0].strip()) + label = l.split(":", 1)[1] + bdinfo["label"] = label + elif line.startswith("subtitle:"): + split1 = l.split(":", 1)[1] + split2 = split1.split("/") + bdinfo["subtitles"].append(split2[0].strip()) files = files.splitlines() - bdinfo['files'] = [] + bdinfo["files"] = [] for line in files: try: stripped = line.split() @@ -197,9 +227,9 @@ def parse_bdinfo(self, bdinfo_input, files, path): bd_length = stripped[2] bd_size = stripped[3] # noqa F841 bd_bitrate = stripped[4] # noqa F841 - m2ts['file'] = bd_file - m2ts['length'] = bd_length - bdinfo['files'].append(m2ts) + m2ts["file"] = bd_file + m2ts["length"] = bd_length + bdinfo["files"].append(m2ts) except Exception: pass return bdinfo @@ -207,9 +237,10 @@ def parse_bdinfo(self, bdinfo_input, files, path): """ Parse VIDEO_TS and get mediainfos """ + async def get_dvdinfo(self, discs): for each in discs: - path = each.get('path') + path = each.get("path") os.chdir(path) files = glob("VTS_*.VOB") files.sort() @@ -225,35 +256,61 @@ async def get_dvdinfo(self, discs): main_set_duration = 0 for vob_set in filesdict.values(): # Parse media info for this VOB set - vob_set_mi = MediaInfo.parse(f"VTS_{vob_set[0][:2]}_0.IFO", output='JSON') + vob_set_mi = MediaInfo.parse( + f"VTS_{vob_set[0][:2]}_0.IFO", output="JSON" + ) vob_set_mi = json.loads(vob_set_mi) - vob_set_duration = vob_set_mi['media']['track'][1]['Duration'] + vob_set_duration = vob_set_mi["media"]["track"][1]["Duration"] # If the duration of the new vob set > main set by more than 10% then it's our new main set # This should make it so TV shows pick the first episode - if (float(vob_set_duration) * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: + if (float(vob_set_duration) * 1.00) > ( + float(main_set_duration) * 1.10 + ) or len(main_set) < 1: main_set = vob_set main_set_duration = vob_set_duration - each['main_set'] = main_set + each["main_set"] = main_set set = main_set[0][:2] - each['vob'] = vob = f"{path}/VTS_{set}_1.VOB" - each['ifo'] = ifo = f"{path}/VTS_{set}_0.IFO" - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each["vob"] = vob = f"{path}/VTS_{set}_1.VOB" + each["ifo"] = ifo = f"{path}/VTS_{set}_0.IFO" + each["vob_mi"] = MediaInfo.parse( + os.path.basename(vob), + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ).replace("\r\n", "\n") + each["ifo_mi"] = MediaInfo.parse( + os.path.basename(ifo), + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ).replace("\r\n", "\n") + each["vob_mi_full"] = MediaInfo.parse( + vob, + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ).replace("\r\n", "\n") + each["ifo_mi_full"] = MediaInfo.parse( + ifo, + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ).replace("\r\n", "\n") - size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)) / float(1 << 30) + size = sum( + os.path.getsize(f) for f in os.listdir(".") if os.path.isfile(f) + ) / float(1 << 30) if size <= 7.95: dvd_size = "DVD9" if size <= 4.37: dvd_size = "DVD5" - each['size'] = dvd_size + each["size"] = dvd_size return discs async def get_hddvd_info(self, discs): for each in discs: - path = each.get('path') + path = each.get("path") os.chdir(path) files = glob("*.EVO") size = 0 @@ -264,6 +321,11 @@ async def get_hddvd_info(self, discs): if file_size > size: largest = file size = file_size - each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version': '1'}) - each['largest_evo'] = os.path.abspath(f"{path}/{largest}") + each["evo_mi"] = MediaInfo.parse( + os.path.basename(largest), + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ) + each["largest_evo"] = os.path.abspath(f"{path}/{largest}") return discs diff --git a/src/exceptions.py b/src/exceptions.py index e5de6f944..818360a11 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -1,6 +1,6 @@ class LoginException(Exception): def __init__(self, *args, **kwargs): - default_message = 'An error occured while logging in' + default_message = "An error occured while logging in" # if any arguments are passed... # If you inherit from the exception that takes message as a keyword # maybe you will need to check kwargs here @@ -14,7 +14,7 @@ def __init__(self, *args, **kwargs): class UploadException(Exception): def __init__(self, *args, **kwargs): - default_message = 'An error occured while uploading' + default_message = "An error occured while uploading" # if any arguments are passed... # If you inherit from the exception that takes message as a keyword # maybe you will need to check kwargs here diff --git a/src/prep.py b/src/prep.py index 2df9bc8a1..511804f6e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -52,14 +52,16 @@ import sys except ModuleNotFoundError: console.print(traceback.print_exc()) - console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') - console.print('[yellow]pip3 install --user -U -r requirements.txt') + console.print( + "[bold red]Missing Module Found. Please reinstall required dependancies." + ) + console.print("[yellow]pip3 install --user -U -r requirements.txt") exit() except KeyboardInterrupt: exit() -class Prep(): +class Prep: """ Prepare for upload: Mediainfo/BDInfo @@ -67,11 +69,12 @@ class Prep(): Database Identifiers (TMDB/IMDB/MAL/etc) Create Name """ + def __init__(self, screens, img_host, config): self.screens = screens self.config = config self.img_host = img_host.lower() - tmdb.API_KEY = config['DEFAULT']['tmdb_api'] + tmdb.API_KEY = config["DEFAULT"]["tmdb_api"] async def prompt_user_for_confirmation(self, message: str) -> bool: try: @@ -84,11 +87,13 @@ async def prompt_user_for_confirmation(self, message: str) -> bool: async def check_images_concurrently(self, imagelist): async def check_and_collect(image_dict): - img_url = image_dict.get('img_url') or image_dict.get('raw_url') + img_url = image_dict.get("img_url") or image_dict.get("raw_url") if img_url and await self.check_image_link(img_url): return image_dict else: - console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + console.print( + f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]" + ) return None tasks = [check_and_collect(image_dict) for image_dict in imagelist] @@ -100,86 +105,127 @@ async def check_image_link(self, url): try: async with session.get(url) as response: if response.status == 200: - content_type = response.headers.get('Content-Type', '').lower() - if 'image' in content_type: + content_type = response.headers.get("Content-Type", "").lower() + if "image" in content_type: # Attempt to load the image image_data = await response.read() try: image = Image.open(io.BytesIO(image_data)) image.verify() # This will check if the image is broken - console.print(f"[green]Image verified successfully: {url}[/green]") + console.print( + f"[green]Image verified successfully: {url}[/green]" + ) return True except (IOError, SyntaxError) as e: # noqa #F841 - console.print(f"[red]Image verification failed (corrupt image): {url}[/red]") + console.print( + f"[red]Image verification failed (corrupt image): {url}[/red]" + ) return False else: - console.print(f"[red]Content type is not an image: {url}[/red]") + console.print( + f"[red]Content type is not an image: {url}[/red]" + ) return False else: - console.print(f"[red]Failed to retrieve image: {url} (status code: {response.status})[/red]") + console.print( + f"[red]Failed to retrieve image: {url} (status code: {response.status})[/red]" + ) return False except Exception as e: - console.print(f"[red]Exception occurred while checking image: {url} - {str(e)}[/red]") + console.print( + f"[red]Exception occurred while checking image: {url} - {str(e)}[/red]" + ) return False async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): # Unpack the expected 9 elements, ignoring any additional ones - tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data - - if tmdb not in [None, '0']: - meta['tmdb_manual'] = tmdb - if imdb not in [None, '0']: - meta['imdb'] = str(imdb).zfill(7) - if tvdb not in [None, '0']: - meta['tvdb_id'] = tvdb - if mal not in [None, '0']: - meta['mal'] = mal - if desc not in [None, '0', '']: - meta[f'{tracker_name.lower()}_desc'] = desc - if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() - - if not meta.get('image_list'): # Only handle images if image_list is not already populated + tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = ( + tracker_data + ) + + if tmdb not in [None, "0"]: + meta["tmdb_manual"] = tmdb + if imdb not in [None, "0"]: + meta["imdb"] = str(imdb).zfill(7) + if tvdb not in [None, "0"]: + meta["tvdb_id"] = tvdb + if mal not in [None, "0"]: + meta["mal"] = mal + if desc not in [None, "0", ""]: + meta[f"{tracker_name.lower()}_desc"] = desc + if category.upper() in ["MOVIE", "TV SHOW", "FANRES"]: + meta["category"] = ( + "TV" if category.upper() == "TV SHOW" else category.upper() + ) + + if not meta.get( + "image_list" + ): # Only handle images if image_list is not already populated if imagelist: # Ensure imagelist is not empty before setting valid_images = await self.check_images_concurrently(imagelist) if valid_images: - meta['image_list'] = valid_images - if meta.get('image_list'): # Double-check if image_list is set before handling it - if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta['unattended']: + meta["image_list"] = valid_images + if meta.get( + "image_list" + ): # Double-check if image_list is set before handling it + if ( + not ( + meta.get("blu") + or meta.get("aither") + or meta.get("lst") + or meta.get("oe") + or meta.get("tik") + ) + or meta["unattended"] + ): await self.handle_image_list(meta, tracker_name) if filename: - meta[f'{tracker_name.lower()}_filename'] = filename + meta[f"{tracker_name.lower()}_filename"] = filename - console.print(f"[green]{tracker_name} data successfully updated in meta[/green]") + console.print( + f"[green]{tracker_name} data successfully updated in meta[/green]" + ) - async def update_metadata_from_tracker(self, tracker_name, tracker_instance, meta, search_term, search_file_folder): + async def update_metadata_from_tracker( + self, tracker_name, tracker_instance, meta, search_term, search_file_folder + ): tracker_key = tracker_name.lower() manual_key = f"{tracker_key}_manual" found_match = False if tracker_name in ["BLU", "AITHER", "LST", "OE", "TIK"]: if meta.get(tracker_key) is not None: - console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + console.print( + f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]" + ) tracker_data = await COMMON(self.config).unit3d_torrent_info( tracker_name, tracker_instance.torrent_url, tracker_instance.search_url, meta, - id=meta[tracker_key] + id=meta[tracker_key], ) else: - console.print(f"[yellow]No ID found in meta for {tracker_name}, searching by file name[/yellow]") + console.print( + f"[yellow]No ID found in meta for {tracker_name}, searching by file name[/yellow]" + ) tracker_data = await COMMON(self.config).unit3d_torrent_info( tracker_name, tracker_instance.torrent_url, tracker_instance.search_url, - file_name=search_term + file_name=search_term, ) - if any(item not in [None, '0'] for item in tracker_data[:3]): # Check for valid tmdb, imdb, or tvdb - console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") - await self.update_meta_with_unit3d_data(meta, tracker_data, tracker_name) + if any( + item not in [None, "0"] for item in tracker_data[:3] + ): # Check for valid tmdb, imdb, or tvdb + console.print( + f"[green]Valid data found on {tracker_name}, setting meta values[/green]" + ) + await self.update_meta_with_unit3d_data( + meta, tracker_data, tracker_name + ) found_match = True else: console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") @@ -188,110 +234,166 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met elif tracker_name == "PTP": imdb_id = None # Ensure imdb_id is defined # Check if the PTP ID is already in meta - if meta.get('ptp') is None: + if meta.get("ptp") is None: # No PTP ID in meta, search by search term - imdb_id, ptp_torrent_id, ptp_torrent_hash = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder, meta) + imdb_id, ptp_torrent_id, ptp_torrent_hash = ( + await tracker_instance.get_ptp_id_imdb( + search_term, search_file_folder, meta + ) + ) if ptp_torrent_id: - meta['ptp'] = ptp_torrent_id - meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None - - console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") - if not meta['unattended']: - if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): - meta['skip_gen_desc'] = True + meta["ptp"] = ptp_torrent_id + meta["imdb"] = str(imdb_id).zfill(7) if imdb_id else None + + console.print( + f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]" + ) + if not meta["unattended"]: + if await self.prompt_user_for_confirmation( + "Do you want to use this ID data from PTP?" + ): + meta["skip_gen_desc"] = True found_match = True # Retrieve PTP description and image list - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc + ptp_desc, ptp_imagelist = ( + await tracker_instance.get_ptp_description( + ptp_torrent_id, meta, meta.get("is_disc", False) + ) + ) + meta["description"] = ptp_desc - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) + if not meta.get( + "image_list" + ): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently( + ptp_imagelist + ) if valid_images: - meta['image_list'] = valid_images + meta["image_list"] = valid_images await self.handle_image_list(meta, tracker_name) - meta['skip_gen_desc'] = True - console.print("[green]PTP images added to metadata.[/green]") + meta["skip_gen_desc"] = True + console.print( + "[green]PTP images added to metadata.[/green]" + ) else: found_match = False - meta['skip_gen_desc'] = True - meta['description'] = None + meta["skip_gen_desc"] = True + meta["description"] = None else: found_match = True - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc - - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) + ptp_desc, ptp_imagelist = ( + await tracker_instance.get_ptp_description( + ptp_torrent_id, meta, meta.get("is_disc", False) + ) + ) + meta["description"] = ptp_desc + + if not meta.get( + "image_list" + ): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently( + ptp_imagelist + ) if valid_images: - meta['image_list'] = valid_images + meta["image_list"] = valid_images else: console.print("[yellow]Skipping PTP as no match found[/yellow]") found_match = False - meta['skip_gen_desc'] = True - meta['description'] = None + meta["skip_gen_desc"] = True + meta["description"] = None else: - ptp_torrent_id = meta['ptp'] - console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") - imdb_id, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + ptp_torrent_id = meta["ptp"] + console.print( + f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]" + ) + imdb_id, _, meta["ext_torrenthash"] = ( + await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + ) if imdb_id: - meta['imdb'] = str(imdb_id).zfill(7) + meta["imdb"] = str(imdb_id).zfill(7) console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") found_match = True else: - console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") + console.print( + f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]" + ) found_match = False # Retrieve PTP description and image list - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description( + meta["ptp"], meta, meta.get("is_disc", False) + ) + meta["description"] = ptp_desc - if not meta.get('image_list'): # Only handle images if image_list is not already populated + if not meta.get( + "image_list" + ): # Only handle images if image_list is not already populated valid_images = await self.check_images_concurrently(ptp_imagelist) if valid_images: - meta['image_list'] = valid_images + meta["image_list"] = valid_images - meta['skip_gen_desc'] = True + meta["skip_gen_desc"] = True console.print("[green]PTP images added to metadata.[/green]") elif tracker_name == "HDB": - if meta.get('hdb') is not None: + if meta.get("hdb") is not None: meta[manual_key] = meta[tracker_key] - console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + console.print( + f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]" + ) # Use get_info_from_torrent_id function if ID is found in meta - imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) + imdb, tvdb_id, hdb_name, meta["ext_torrenthash"] = ( + await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) + ) - meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') - meta['hdb_name'] = hdb_name + meta["tvdb_id"] = str(tvdb_id) if tvdb_id else meta.get("tvdb_id") + meta["hdb_name"] = hdb_name found_match = True # Skip user confirmation if searching by ID - console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + console.print( + f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]" + ) else: - console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + console.print( + "[yellow]No ID found in meta for HDB, searching by file name[/yellow]" + ) # Use search_filename function if ID is not found in meta - imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder, meta) + imdb, tvdb_id, hdb_name, meta["ext_torrenthash"], tracker_id = ( + await tracker_instance.search_filename( + search_term, search_file_folder, meta + ) + ) - meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') - meta['hdb_name'] = hdb_name + meta["tvdb_id"] = str(tvdb_id) if tvdb_id else meta.get("tvdb_id") + meta["hdb_name"] = hdb_name if tracker_id: meta[tracker_key] = tracker_id found_match = True if found_match: if imdb or tvdb_id or hdb_name: - console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") - if await self.prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): - console.print(f"[green]{tracker_name} data retained.[/green]") + console.print( + f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]" + ) + if await self.prompt_user_for_confirmation( + f"Do you want to use the ID's found on {tracker_name}?" + ): + console.print( + f"[green]{tracker_name} data retained.[/green]" + ) else: - console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") + console.print( + f"[yellow]{tracker_name} data discarded.[/yellow]" + ) meta[tracker_key] = None - meta['tvdb_id'] = None - meta['hdb_name'] = None + meta["tvdb_id"] = None + meta["hdb_name"] = None found_match = False else: found_match = False @@ -299,244 +401,396 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met return meta, found_match async def handle_image_list(self, meta, tracker_name): - if meta.get('image_list'): + if meta.get("image_list"): console.print(f"[cyan]Found the following images from {tracker_name}:") - for img in meta['image_list']: + for img in meta["image_list"]: console.print(f"[blue]{img}[/blue]") - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ["ptpimg", "imgbox"] # Check if the images are already hosted on an approved image host - if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): - image_list = meta['image_list'] # noqa #F841 + if all( + any(host in image["raw_url"] for host in approved_image_hosts) + for image in meta["image_list"] + ): + image_list = meta["image_list"] # noqa #F841 else: - default_trackers = self.config['TRACKERS'].get('default_trackers', '') - trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] - if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', ''): - console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") - - if meta['unattended']: + default_trackers = self.config["TRACKERS"].get("default_trackers", "") + trackers_list = [ + tracker.strip() for tracker in default_trackers.split(",") + ] + if "MTV" in trackers_list or "MTV" in meta.get("trackers", ""): + console.print( + "[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images." + ) + + if meta["unattended"]: keep_images = True else: - keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") + keep_images = await self.prompt_user_for_confirmation( + f"Do you want to keep the images found on {tracker_name}?" + ) if not keep_images: - meta['image_list'] = [] + meta["image_list"] = [] console.print(f"[yellow]Images discarded from {tracker_name}.") else: console.print(f"[green]Images retained from {tracker_name}.") async def gather_prep(self, meta, mode): - meta['mode'] = mode + meta["mode"] = mode base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) - meta['isdir'] = os.path.isdir(meta['path']) - base_dir = meta['base_dir'] + meta["isdir"] = os.path.isdir(meta["path"]) + base_dir = meta["base_dir"] - if meta.get('uuid', None) is None: - folder_id = os.path.basename(meta['path']) - meta['uuid'] = folder_id + if meta.get("uuid", None) is None: + folder_id = os.path.basename(meta["path"]) + meta["uuid"] = folder_id if not os.path.exists(f"{base_dir}/tmp/{meta['uuid']}"): Path(f"{base_dir}/tmp/{meta['uuid']}").mkdir(parents=True, exist_ok=True) - if meta['debug']: + if meta["debug"]: console.print(f"[cyan]ID: {meta['uuid']}") - meta['is_disc'], videoloc, bdinfo, meta['discs'] = await self.get_disc(meta) + meta["is_disc"], videoloc, bdinfo, meta["discs"] = await self.get_disc(meta) # Debugging information # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") - if meta['is_disc'] == "BDMV": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) - meta['filelist'] = [] # No filelist for discs, use path - search_term = os.path.basename(meta['path']) - search_file_folder = 'folder' + if meta["is_disc"] == "BDMV": + video, meta["scene"], meta["imdb"] = self.is_scene( + meta["path"], meta.get("imdb", None) + ) + meta["filelist"] = [] # No filelist for discs, use path + search_term = os.path.basename(meta["path"]) + search_file_folder = "folder" try: - guess_name = bdinfo['title'].replace('-', ' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] - untouched_filename = bdinfo['title'] + guess_name = bdinfo["title"].replace("-", " ") + filename = guessit( + re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), + {"excludes": ["country", "language"]}, + )["title"] + untouched_filename = bdinfo["title"] try: - meta['search_year'] = guessit(bdinfo['title'])['year'] + meta["search_year"] = guessit(bdinfo["title"])["year"] except Exception: - meta['search_year'] = "" + meta["search_year"] = "" except Exception: - guess_name = bdinfo['label'].replace('-', ' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] - untouched_filename = bdinfo['label'] + guess_name = bdinfo["label"].replace("-", " ") + filename = guessit( + re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), + {"excludes": ["country", "language"]}, + )["title"] + untouched_filename = bdinfo["label"] try: - meta['search_year'] = guessit(bdinfo['label'])['year'] + meta["search_year"] = guessit(bdinfo["label"])["year"] except Exception: - meta['search_year'] = "" - - if meta.get('resolution', None) is None: - meta['resolution'] = self.mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) - meta['sd'] = self.is_sd(meta['resolution']) + meta["search_year"] = "" + + if meta.get("resolution", None) is None: + meta["resolution"] = self.mi_resolution( + bdinfo["video"][0]["res"], + guessit(video), + width="OTHER", + scan="p", + height="OTHER", + actual_height=0, + ) + meta["sd"] = self.is_sd(meta["resolution"]) mi = None - elif meta['is_disc'] == "DVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) - meta['filelist'] = [] - search_term = os.path.basename(meta['path']) - search_file_folder = 'folder' - guess_name = meta['discs'][0]['path'].replace('-', ' ') - filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] - untouched_filename = os.path.basename(os.path.dirname(meta['discs'][0]['path'])) + elif meta["is_disc"] == "DVD": + video, meta["scene"], meta["imdb"] = self.is_scene( + meta["path"], meta.get("imdb", None) + ) + meta["filelist"] = [] + search_term = os.path.basename(meta["path"]) + search_file_folder = "folder" + guess_name = meta["discs"][0]["path"].replace("-", " ") + filename = guessit(guess_name, {"excludes": ["country", "language"]})[ + "title" + ] + untouched_filename = os.path.basename( + os.path.dirname(meta["discs"][0]["path"]) + ) try: - meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] + meta["search_year"] = guessit(meta["discs"][0]["path"])["year"] except Exception: - meta['search_year'] = "" - if not meta.get('edit', False): - mi = self.exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False) - meta['mediainfo'] = mi + meta["search_year"] = "" + if not meta.get("edit", False): + mi = self.exportInfo( + f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", + False, + meta["uuid"], + meta["base_dir"], + export_text=False, + ) + meta["mediainfo"] = mi else: - mi = meta['mediainfo'] - - meta['dvd_size'] = await self.get_dvd_size(meta['discs'], meta.get('manual_dvds')) - meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - meta['sd'] = self.is_sd(meta['resolution']) - - elif meta['is_disc'] == "HDDVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) - meta['filelist'] = [] - search_term = os.path.basename(meta['path']) - search_file_folder = 'folder' - guess_name = meta['discs'][0]['path'].replace('-', '') - filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] - untouched_filename = os.path.basename(meta['discs'][0]['path']) - videopath = meta['discs'][0]['largest_evo'] + mi = meta["mediainfo"] + + meta["dvd_size"] = await self.get_dvd_size( + meta["discs"], meta.get("manual_dvds") + ) + meta["resolution"] = self.get_resolution( + guessit(video), meta["uuid"], base_dir + ) + meta["sd"] = self.is_sd(meta["resolution"]) + + elif meta["is_disc"] == "HDDVD": + video, meta["scene"], meta["imdb"] = self.is_scene( + meta["path"], meta.get("imdb", None) + ) + meta["filelist"] = [] + search_term = os.path.basename(meta["path"]) + search_file_folder = "folder" + guess_name = meta["discs"][0]["path"].replace("-", "") + filename = guessit(guess_name, {"excludes": ["country", "language"]})[ + "title" + ] + untouched_filename = os.path.basename(meta["discs"][0]["path"]) + videopath = meta["discs"][0]["largest_evo"] try: - meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] + meta["search_year"] = guessit(meta["discs"][0]["path"])["year"] except Exception: - meta['search_year'] = "" - if not meta.get('edit', False): - mi = self.exportInfo(meta['discs'][0]['largest_evo'], False, meta['uuid'], meta['base_dir'], export_text=False) - meta['mediainfo'] = mi + meta["search_year"] = "" + if not meta.get("edit", False): + mi = self.exportInfo( + meta["discs"][0]["largest_evo"], + False, + meta["uuid"], + meta["base_dir"], + export_text=False, + ) + meta["mediainfo"] = mi else: - mi = meta['mediainfo'] - meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - meta['sd'] = self.is_sd(meta['resolution']) + mi = meta["mediainfo"] + meta["resolution"] = self.get_resolution( + guessit(video), meta["uuid"], base_dir + ) + meta["sd"] = self.is_sd(meta["resolution"]) else: - videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) - search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None - search_file_folder = 'file' - video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) - guess_name = ntpath.basename(video).replace('-', ' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes": ["country", "language"]})["title"]) + videopath, meta["filelist"] = self.get_video( + videoloc, meta.get("mode", "discord") + ) + search_term = ( + os.path.basename(meta["filelist"][0]) if meta["filelist"] else None + ) + search_file_folder = "file" + video, meta["scene"], meta["imdb"] = self.is_scene( + videopath, meta.get("imdb", None) + ) + guess_name = ntpath.basename(video).replace("-", " ") + filename = guessit( + re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), + {"excludes": ["country", "language"]}, + ).get( + "title", + guessit( + re.sub("[^0-9a-zA-Z]+", " ", guess_name), + {"excludes": ["country", "language"]}, + )["title"], + ) untouched_filename = os.path.basename(video) try: - meta['search_year'] = guessit(video)['year'] + meta["search_year"] = guessit(video)["year"] except Exception: - meta['search_year'] = "" + meta["search_year"] = "" - if not meta.get('edit', False): - mi = self.exportInfo(videopath, meta['isdir'], meta['uuid'], base_dir, export_text=True) - meta['mediainfo'] = mi + if not meta.get("edit", False): + mi = self.exportInfo( + videopath, meta["isdir"], meta["uuid"], base_dir, export_text=True + ) + meta["mediainfo"] = mi else: - mi = meta['mediainfo'] + mi = meta["mediainfo"] - if meta.get('resolution', None) is None: - meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - meta['sd'] = self.is_sd(meta['resolution']) + if meta.get("resolution", None) is None: + meta["resolution"] = self.get_resolution( + guessit(video), meta["uuid"], base_dir + ) + meta["sd"] = self.is_sd(meta["resolution"]) - if " AKA " in filename.replace('.', ' '): - filename = filename.split('AKA')[0] - meta['filename'] = filename + if " AKA " in filename.replace(".", " "): + filename = filename.split("AKA")[0] + meta["filename"] = filename - meta['bdinfo'] = bdinfo + meta["bdinfo"] = bdinfo # Debugging information after population # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") - if not meta.get('image_list'): + if not meta.get("image_list"): # Reuse information from trackers with fallback found_match = False if search_term: # Check if specific trackers are already set in meta specific_tracker = None - if meta.get('ptp'): - specific_tracker = 'PTP' - elif meta.get('hdb'): - specific_tracker = 'HDB' - elif meta.get('blu'): - specific_tracker = 'BLU' - elif meta.get('aither'): - specific_tracker = 'AITHER' - elif meta.get('lst'): - specific_tracker = 'LST' - elif meta.get('oe'): - specific_tracker = 'OE' - elif meta.get('tik'): - specific_tracker = 'TIK' + if meta.get("ptp"): + specific_tracker = "PTP" + elif meta.get("hdb"): + specific_tracker = "HDB" + elif meta.get("blu"): + specific_tracker = "BLU" + elif meta.get("aither"): + specific_tracker = "AITHER" + elif meta.get("lst"): + specific_tracker = "LST" + elif meta.get("oe"): + specific_tracker = "OE" + elif meta.get("tik"): + specific_tracker = "TIK" # If a specific tracker is found, only process that one if specific_tracker: - console.print(f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]") - - if specific_tracker == 'PTP' and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + console.print( + f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]" + ) + + if ( + specific_tracker == "PTP" + and str( + self.config["TRACKERS"].get("PTP", {}).get("useAPI") + ).lower() + == "true" + ): ptp = PTP(config=self.config) - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "PTP", ptp, meta, search_term, search_file_folder + ) if match: found_match = True - elif specific_tracker == 'BLU' and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + elif ( + specific_tracker == "BLU" + and str( + self.config["TRACKERS"].get("BLU", {}).get("useAPI") + ).lower() + == "true" + ): blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "BLU", blu, meta, search_term, search_file_folder + ) if match: found_match = True - elif specific_tracker == 'AITHER' and str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": + elif ( + specific_tracker == "AITHER" + and str( + self.config["TRACKERS"].get("AITHER", {}).get("useAPI") + ).lower() + == "true" + ): aither = AITHER(config=self.config) - meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "AITHER", aither, meta, search_term, search_file_folder + ) if match: found_match = True - elif specific_tracker == 'LST' and str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": + elif ( + specific_tracker == "LST" + and str( + self.config["TRACKERS"].get("LST", {}).get("useAPI") + ).lower() + == "true" + ): lst = LST(config=self.config) - meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "LST", lst, meta, search_term, search_file_folder + ) if match: found_match = True - elif specific_tracker == 'OE' and str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": + elif ( + specific_tracker == "OE" + and str( + self.config["TRACKERS"].get("OE", {}).get("useAPI") + ).lower() + == "true" + ): oe = OE(config=self.config) - meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "OE", oe, meta, search_term, search_file_folder + ) if match: found_match = True - elif specific_tracker == 'TIK' and str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": + elif ( + specific_tracker == "TIK" + and str( + self.config["TRACKERS"].get("TIK", {}).get("useAPI") + ).lower() + == "true" + ): tik = TIK(config=self.config) - meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "TIK", tik, meta, search_term, search_file_folder + ) if match: found_match = True - elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + elif ( + specific_tracker == "HDB" + and str( + self.config["TRACKERS"].get("HDB", {}).get("useAPI") + ).lower() + == "true" + ): hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "HDB", hdb, meta, search_term, search_file_folder + ) if match: found_match = True else: # Process all trackers if no specific tracker is set in meta - default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") + default_trackers = ( + self.config["TRACKERS"].get("default_trackers", "").split(", ") + ) if "PTP" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + if ( + str( + self.config["TRACKERS"].get("PTP", {}).get("useAPI") + ).lower() + == "true" + ): ptp = PTP(config=self.config) - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "PTP", ptp, meta, search_term, search_file_folder + ) if match: found_match = True if "BLU" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + if ( + str( + self.config["TRACKERS"].get("BLU", {}).get("useAPI") + ).lower() + == "true" + ): blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "BLU", blu, meta, search_term, search_file_folder + ) if match: found_match = True if "HDB" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + if ( + str( + self.config["TRACKERS"].get("HDB", {}).get("useAPI") + ).lower() + == "true" + ): hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker( + "HDB", hdb, meta, search_term, search_file_folder + ) if match: found_match = True @@ -545,197 +799,279 @@ async def gather_prep(self, meta, mode): else: console.print(f"[green]Match found: {found_match}[/green]") else: - console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") + console.print( + "[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]" + ) else: console.print("Skipping existing search as meta already populated") # Take Screenshots - if meta['is_disc'] == "BDMV": - if meta.get('edit', False) is False: - if meta.get('vapoursynth', False) is True: + if meta["is_disc"] == "BDMV": + if meta.get("edit", False) is False: + if meta.get("vapoursynth", False) is True: use_vs = True else: use_vs = False try: - ds = multiprocessing.Process(target=self.disc_screenshots, args=(filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) + ds = multiprocessing.Process( + target=self.disc_screenshots, + args=( + filename, + bdinfo, + meta["uuid"], + base_dir, + use_vs, + meta.get("image_list", []), + meta.get("ffdebug", False), + None, + ), + ) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) except KeyboardInterrupt: ds.terminate() - elif meta['is_disc'] == "DVD": - if meta.get('edit', False) is False: + elif meta["is_disc"] == "DVD": + if meta.get("edit", False) is False: try: - ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None)) + ds = multiprocessing.Process( + target=self.dvd_screenshots, args=(meta, 0, None) + ) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) except KeyboardInterrupt: ds.terminate() else: - if meta.get('edit', False) is False: + if meta.get("edit", False) is False: try: - s = multiprocessing.Process(target=self.screenshots, args=(videopath, filename, meta['uuid'], base_dir, meta)) + s = multiprocessing.Process( + target=self.screenshots, + args=(videopath, filename, meta["uuid"], base_dir, meta), + ) s.start() while s.is_alive() is True: await asyncio.sleep(3) except KeyboardInterrupt: s.terminate() - meta['tmdb'] = meta.get('tmdb_manual', None) - if meta.get('type', None) is None: - meta['type'] = self.get_type(video, meta['scene'], meta['is_disc']) - if meta.get('category', None) is None: - meta['category'] = self.get_cat(video) + meta["tmdb"] = meta.get("tmdb_manual", None) + if meta.get("type", None) is None: + meta["type"] = self.get_type(video, meta["scene"], meta["is_disc"]) + if meta.get("category", None) is None: + meta["category"] = self.get_cat(video) else: - meta['category'] = meta['category'].upper() - if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) - if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: - meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) - elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: - meta['imdb_id'] = str(meta['imdb']).replace('tt', '') + meta["category"] = meta["category"].upper() + if meta.get("tmdb", None) is None and meta.get("imdb", None) is None: + meta["category"], meta["tmdb"], meta["imdb"] = ( + self.get_tmdb_imdb_from_mediainfo( + mi, meta["category"], meta["is_disc"], meta["tmdb"], meta["imdb"] + ) + ) + if meta.get("tmdb", None) is None and meta.get("imdb", None) is None: + meta = await self.get_tmdb_id( + filename, + meta["search_year"], + meta, + meta["category"], + untouched_filename, + ) + elif ( + meta.get("imdb", None) is not None and meta.get("tmdb_manual", None) is None + ): + meta["imdb_id"] = str(meta["imdb"]).replace("tt", "") meta = await self.get_tmdb_from_imdb(meta, filename) else: - meta['tmdb_manual'] = meta.get('tmdb', None) + meta["tmdb_manual"] = meta.get("tmdb", None) # If no tmdb, use imdb for meta - if int(meta['tmdb']) == 0: + if int(meta["tmdb"]) == 0: meta = await self.imdb_other_meta(meta) else: meta = await self.tmdb_other_meta(meta) # Search tvmaze - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0)) + meta["tvmaze_id"], meta["imdb_id"], meta["tvdb_id"] = await self.search_tvmaze( + filename, + meta["search_year"], + meta.get("imdb_id", "0"), + meta.get("tvdb_id", 0), + ) # If no imdb, search for it - if meta.get('imdb_id', None) is None: - meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) - if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: - meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) - if meta.get('tag', None) is None: - meta['tag'] = self.get_tag(video, meta) + if meta.get("imdb_id", None) is None: + meta["imdb_id"] = await self.search_imdb(filename, meta["search_year"]) + if meta.get("imdb_info", None) is None and int(meta["imdb_id"]) != 0: + meta["imdb_info"] = await self.get_imdb_info(meta["imdb_id"], meta) + if meta.get("tag", None) is None: + meta["tag"] = self.get_tag(video, meta) else: - if not meta['tag'].startswith('-') and meta['tag'] != "": - meta['tag'] = f"-{meta['tag']}" + if not meta["tag"].startswith("-") and meta["tag"] != "": + meta["tag"] = f"-{meta['tag']}" meta = await self.get_season_episode(video, meta) meta = await self.tag_override(meta) - meta['video'] = video - meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) - if meta['tag'][1:].startswith(meta['channels']): - meta['tag'] = meta['tag'].replace(f"-{meta['channels']}", '') - if meta.get('no_tag', False): - meta['tag'] = "" - meta['3D'] = self.is_3d(mi, bdinfo) - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) - if meta.get('service', None) in (None, ''): - meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) - elif meta.get('service'): + meta["video"] = video + meta["audio"], meta["channels"], meta["has_commentary"] = self.get_audio_v2( + mi, meta, bdinfo + ) + if meta["tag"][1:].startswith(meta["channels"]): + meta["tag"] = meta["tag"].replace(f"-{meta['channels']}", "") + if meta.get("no_tag", False): + meta["tag"] = "" + meta["3D"] = self.is_3d(mi, bdinfo) + meta["source"], meta["type"] = self.get_source( + meta["type"], video, meta["path"], meta["is_disc"], meta + ) + if meta.get("service", None) in (None, ""): + meta["service"], meta["service_longname"] = self.get_service( + video, meta.get("tag", ""), meta["audio"], meta["filename"] + ) + elif meta.get("service"): services = self.get_service(get_services_only=True) - meta['service_longname'] = max((k for k, v in services.items() if v == meta['service']), key=len, default=meta['service']) - meta['uhd'] = self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) - meta['hdr'] = self.get_hdr(mi, bdinfo) - meta['distributor'] = self.get_distributor(meta['distributor']) - if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific - meta['region'] = self.get_region(bdinfo, meta.get('region', None)) - meta['video_codec'] = self.get_video_codec(bdinfo) + meta["service_longname"] = max( + (k for k, v in services.items() if v == meta["service"]), + key=len, + default=meta["service"], + ) + meta["uhd"] = self.get_uhd( + meta["type"], guessit(meta["path"]), meta["resolution"], meta["path"] + ) + meta["hdr"] = self.get_hdr(mi, bdinfo) + meta["distributor"] = self.get_distributor(meta["distributor"]) + if meta.get("is_disc", None) == "BDMV": # Blu-ray Specific + meta["region"] = self.get_region(bdinfo, meta.get("region", None)) + meta["video_codec"] = self.get_video_codec(bdinfo) else: - meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) - - meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) - if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + ( + meta["video_encode"], + meta["video_codec"], + meta["has_encode_settings"], + meta["bit_depth"], + ) = self.get_video_encode(mi, meta["type"], bdinfo) + + meta["edition"], meta["repack"] = self.get_edition( + meta["path"], bdinfo, meta["filelist"], meta.get("manual_edition") + ) + if "REPACK" in meta.get("edition", ""): + meta["repack"] = re.search(r"REPACK[\d]?", meta["edition"])[0] + meta["edition"] = ( + re.sub(r"REPACK[\d]?", "", meta["edition"]).strip().replace(" ", " ") + ) # WORK ON THIS - meta.get('stream', False) - meta['stream'] = self.stream_optimized(meta['stream']) - meta.get('anon', False) - meta['anon'] = self.is_anon(meta['anon']) + meta.get("stream", False) + meta["stream"] = self.stream_optimized(meta["stream"]) + meta.get("anon", False) + meta["anon"] = self.is_anon(meta["anon"]) meta = await self.gen_desc(meta) return meta """ Determine if disc and if so, get bdinfo """ + async def get_disc(self, meta): is_disc = None - videoloc = meta['path'] + videoloc = meta["path"] bdinfo = None bd_summary = None # noqa: F841 discs = [] parse = DiscParse() - for path, directories, files in os. walk(meta['path']): + for path, directories, files in os.walk(meta["path"]): for each in directories: if each.upper() == "BDMV": # BDMVs is_disc = "BDMV" disc = { - 'path': f"{path}/{each}", - 'name': os.path.basename(path), - 'type': 'BDMV', - 'summary': "", - 'bdinfo': "" + "path": f"{path}/{each}", + "name": os.path.basename(path), + "type": "BDMV", + "summary": "", + "bdinfo": "", } discs.append(disc) elif each == "VIDEO_TS": # DVDs is_disc = "DVD" disc = { - 'path': f"{path}/{each}", - 'name': os.path.basename(path), - 'type': 'DVD', - 'vob_mi': '', - 'ifo_mi': '', - 'main_set': [], - 'size': "" + "path": f"{path}/{each}", + "name": os.path.basename(path), + "type": "DVD", + "vob_mi": "", + "ifo_mi": "", + "main_set": [], + "size": "", } discs.append(disc) elif each == "HVDVD_TS": is_disc = "HDDVD" disc = { - 'path': f"{path}/{each}", - 'name': os.path.basename(path), - 'type': 'HDDVD', - 'evo_mi': '', - 'largest_evo': "" + "path": f"{path}/{each}", + "name": os.path.basename(path), + "type": "HDDVD", + "evo_mi": "", + "largest_evo": "", } discs.append(disc) if is_disc == "BDMV": - if meta.get('edit', False) is False: - discs, bdinfo = await parse.get_bdinfo(discs, meta['uuid'], meta['base_dir'], meta.get('discs', [])) + if meta.get("edit", False) is False: + discs, bdinfo = await parse.get_bdinfo( + discs, meta["uuid"], meta["base_dir"], meta.get("discs", []) + ) else: - discs, bdinfo = await parse.get_bdinfo(meta['discs'], meta['uuid'], meta['base_dir'], meta['discs']) + discs, bdinfo = await parse.get_bdinfo( + meta["discs"], meta["uuid"], meta["base_dir"], meta["discs"] + ) elif is_disc == "DVD": discs = await parse.get_dvdinfo(discs) - export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') - export.write(discs[0]['ifo_mi']) + export = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "w", + newline="", + encoding="utf-8", + ) + export.write(discs[0]["ifo_mi"]) export.close() - export_clean = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') - export_clean.write(discs[0]['ifo_mi']) + export_clean = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "w", + newline="", + encoding="utf-8", + ) + export_clean.write(discs[0]["ifo_mi"]) export_clean.close() elif is_disc == "HDDVD": discs = await parse.get_hddvd_info(discs) - export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') - export.write(discs[0]['evo_mi']) + export = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "w", + newline="", + encoding="utf-8", + ) + export.write(discs[0]["evo_mi"]) export.close() - discs = sorted(discs, key=lambda d: d['name']) + discs = sorted(discs, key=lambda d: d["name"]) return is_disc, videoloc, bdinfo, discs """ Get video files """ + def get_video(self, videoloc, mode): filelist = [] videoloc = os.path.abspath(videoloc) if os.path.isdir(videoloc): - globlist = glob.glob1(videoloc, "*.mkv") + glob.glob1(videoloc, "*.mp4") + glob.glob1(videoloc, "*.ts") + globlist = ( + glob.glob1(videoloc, "*.mkv") + + glob.glob1(videoloc, "*.mp4") + + glob.glob1(videoloc, "*.ts") + ) for file in globlist: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + if not file.lower().endswith("sample.mkv") or "!sample" in file.lower(): filelist.append(os.path.abspath(f"{videoloc}{os.sep}{file}")) try: video = sorted(filelist)[0] except IndexError: console.print("[bold red]No Video files found") - if mode == 'cli': + if mode == "cli": exit() else: video = videoloc @@ -746,182 +1082,252 @@ def get_video(self, videoloc, mode): """ Get and parse mediainfo """ + def exportInfo(self, video, isdir, folder_id, base_dir, export_text): def filter_mediainfo(data): filtered = { "creatingLibrary": data.get("creatingLibrary"), - "media": { - "@ref": data["media"]["@ref"], - "track": [] - } + "media": {"@ref": data["media"]["@ref"], "track": []}, } for track in data["media"]["track"]: if track["@type"] == "General": - filtered["media"]["track"].append({ - "@type": track["@type"], - "UniqueID": track.get("UniqueID"), - "VideoCount": track.get("VideoCount"), - "AudioCount": track.get("AudioCount"), - "TextCount": track.get("TextCount"), - "MenuCount": track.get("MenuCount"), - "FileExtension": track.get("FileExtension"), - "Format": track.get("Format"), - "Format_Version": track.get("Format_Version"), - "FileSize": track.get("FileSize"), - "Duration": track.get("Duration"), - "OverallBitRate": track.get("OverallBitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "StreamSize": track.get("StreamSize"), - "IsStreamable": track.get("IsStreamable"), - "File_Created_Date": track.get("File_Created_Date"), - "File_Created_Date_Local": track.get("File_Created_Date_Local"), - "File_Modified_Date": track.get("File_Modified_Date"), - "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), - "Encoded_Application": track.get("Encoded_Application"), - "Encoded_Library": track.get("Encoded_Library"), - }) + filtered["media"]["track"].append( + { + "@type": track["@type"], + "UniqueID": track.get("UniqueID"), + "VideoCount": track.get("VideoCount"), + "AudioCount": track.get("AudioCount"), + "TextCount": track.get("TextCount"), + "MenuCount": track.get("MenuCount"), + "FileExtension": track.get("FileExtension"), + "Format": track.get("Format"), + "Format_Version": track.get("Format_Version"), + "FileSize": track.get("FileSize"), + "Duration": track.get("Duration"), + "OverallBitRate": track.get("OverallBitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "StreamSize": track.get("StreamSize"), + "IsStreamable": track.get("IsStreamable"), + "File_Created_Date": track.get("File_Created_Date"), + "File_Created_Date_Local": track.get( + "File_Created_Date_Local" + ), + "File_Modified_Date": track.get("File_Modified_Date"), + "File_Modified_Date_Local": track.get( + "File_Modified_Date_Local" + ), + "Encoded_Application": track.get("Encoded_Application"), + "Encoded_Library": track.get("Encoded_Library"), + } + ) elif track["@type"] == "Video": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Profile": track.get("Format_Profile"), - "Format_Level": track.get("Format_Level"), - "Format_Tier": track.get("Format_Tier"), - "HDR_Format": track.get("HDR_Format"), - "HDR_Format_Version": track.get("HDR_Format_Version"), - "HDR_Format_Profile": track.get("HDR_Format_Profile"), - "HDR_Format_Level": track.get("HDR_Format_Level"), - "HDR_Format_Settings": track.get("HDR_Format_Settings"), - "HDR_Format_Compression": track.get("HDR_Format_Compression"), - "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "Width": track.get("Width"), - "Height": track.get("Height"), - "Stored_Height": track.get("Stored_Height"), - "Sampled_Width": track.get("Sampled_Width"), - "Sampled_Height": track.get("Sampled_Height"), - "PixelAspectRatio": track.get("PixelAspectRatio"), - "DisplayAspectRatio": track.get("DisplayAspectRatio"), - "FrameRate_Mode": track.get("FrameRate_Mode"), - "FrameRate": track.get("FrameRate"), - "FrameRate_Num": track.get("FrameRate_Num"), - "FrameRate_Den": track.get("FrameRate_Den"), - "FrameCount": track.get("FrameCount"), - "ColorSpace": track.get("ColorSpace"), - "ChromaSubsampling": track.get("ChromaSubsampling"), - "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position"), - "BitDepth": track.get("BitDepth"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "StreamSize": track.get("StreamSize"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - "colour_description_present": track.get("colour_description_present"), - "colour_description_present_Source": track.get("colour_description_present_Source"), - "colour_range": track.get("colour_range"), - "colour_range_Source": track.get("colour_range_Source"), - "colour_primaries": track.get("colour_primaries"), - "colour_primaries_Source": track.get("colour_primaries_Source"), - "transfer_characteristics": track.get("transfer_characteristics"), - "transfer_characteristics_Source": track.get("transfer_characteristics_Source"), - "matrix_coefficients": track.get("matrix_coefficients"), - "matrix_coefficients_Source": track.get("matrix_coefficients_Source"), - "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries"), - "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source"), - "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance"), - "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source"), - "MaxCLL": track.get("MaxCLL"), - "MaxCLL_Source": track.get("MaxCLL_Source"), - "MaxFALL": track.get("MaxFALL"), - "MaxFALL_Source": track.get("MaxFALL_Source"), - }) + filtered["media"]["track"].append( + { + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Profile": track.get("Format_Profile"), + "Format_Level": track.get("Format_Level"), + "Format_Tier": track.get("Format_Tier"), + "HDR_Format": track.get("HDR_Format"), + "HDR_Format_Version": track.get("HDR_Format_Version"), + "HDR_Format_Profile": track.get("HDR_Format_Profile"), + "HDR_Format_Level": track.get("HDR_Format_Level"), + "HDR_Format_Settings": track.get("HDR_Format_Settings"), + "HDR_Format_Compression": track.get( + "HDR_Format_Compression" + ), + "HDR_Format_Compatibility": track.get( + "HDR_Format_Compatibility" + ), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "Width": track.get("Width"), + "Height": track.get("Height"), + "Stored_Height": track.get("Stored_Height"), + "Sampled_Width": track.get("Sampled_Width"), + "Sampled_Height": track.get("Sampled_Height"), + "PixelAspectRatio": track.get("PixelAspectRatio"), + "DisplayAspectRatio": track.get("DisplayAspectRatio"), + "FrameRate_Mode": track.get("FrameRate_Mode"), + "FrameRate": track.get("FrameRate"), + "FrameRate_Num": track.get("FrameRate_Num"), + "FrameRate_Den": track.get("FrameRate_Den"), + "FrameCount": track.get("FrameCount"), + "ColorSpace": track.get("ColorSpace"), + "ChromaSubsampling": track.get("ChromaSubsampling"), + "ChromaSubsampling_Position": track.get( + "ChromaSubsampling_Position" + ), + "BitDepth": track.get("BitDepth"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + "colour_description_present": track.get( + "colour_description_present" + ), + "colour_description_present_Source": track.get( + "colour_description_present_Source" + ), + "colour_range": track.get("colour_range"), + "colour_range_Source": track.get("colour_range_Source"), + "colour_primaries": track.get("colour_primaries"), + "colour_primaries_Source": track.get( + "colour_primaries_Source" + ), + "transfer_characteristics": track.get( + "transfer_characteristics" + ), + "transfer_characteristics_Source": track.get( + "transfer_characteristics_Source" + ), + "matrix_coefficients": track.get("matrix_coefficients"), + "matrix_coefficients_Source": track.get( + "matrix_coefficients_Source" + ), + "MasteringDisplay_ColorPrimaries": track.get( + "MasteringDisplay_ColorPrimaries" + ), + "MasteringDisplay_ColorPrimaries_Source": track.get( + "MasteringDisplay_ColorPrimaries_Source" + ), + "MasteringDisplay_Luminance": track.get( + "MasteringDisplay_Luminance" + ), + "MasteringDisplay_Luminance_Source": track.get( + "MasteringDisplay_Luminance_Source" + ), + "MaxCLL": track.get("MaxCLL"), + "MaxCLL_Source": track.get("MaxCLL_Source"), + "MaxFALL": track.get("MaxFALL"), + "MaxFALL_Source": track.get("MaxFALL_Source"), + } + ) elif track["@type"] == "Audio": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny"), - "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), - "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate_Mode": track.get("BitRate_Mode"), - "BitRate": track.get("BitRate"), - "Channels": track.get("Channels"), - "ChannelPositions": track.get("ChannelPositions"), - "ChannelLayout": track.get("ChannelLayout"), - "SamplesPerFrame": track.get("SamplesPerFrame"), - "SamplingRate": track.get("SamplingRate"), - "SamplingCount": track.get("SamplingCount"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "Compression_Mode": track.get("Compression_Mode"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "Video_Delay": track.get("Video_Delay"), - "StreamSize": track.get("StreamSize"), - "Language": track.get("Language"), - "ServiceKind": track.get("ServiceKind"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - "extra": track.get("extra"), - }) + filtered["media"]["track"].append( + { + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Commercial_IfAny": track.get( + "Format_Commercial_IfAny" + ), + "Format_Settings_Endianness": track.get( + "Format_Settings_Endianness" + ), + "Format_AdditionalFeatures": track.get( + "Format_AdditionalFeatures" + ), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate_Mode": track.get("BitRate_Mode"), + "BitRate": track.get("BitRate"), + "Channels": track.get("Channels"), + "ChannelPositions": track.get("ChannelPositions"), + "ChannelLayout": track.get("ChannelLayout"), + "SamplesPerFrame": track.get("SamplesPerFrame"), + "SamplingRate": track.get("SamplingRate"), + "SamplingCount": track.get("SamplingCount"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "Compression_Mode": track.get("Compression_Mode"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "Video_Delay": track.get("Video_Delay"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "ServiceKind": track.get("ServiceKind"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + "extra": track.get("extra"), + } + ) elif track["@type"] == "Text": - filtered["media"]["track"].append({ - "@type": track["@type"], - "@typeorder": track.get("@typeorder"), - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "ElementCount": track.get("ElementCount"), - "StreamSize": track.get("StreamSize"), - "Title": track.get("Title"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - }) + filtered["media"]["track"].append( + { + "@type": track["@type"], + "@typeorder": track.get("@typeorder"), + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "ElementCount": track.get("ElementCount"), + "StreamSize": track.get("StreamSize"), + "Title": track.get("Title"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + } + ) elif track["@type"] == "Menu": - filtered["media"]["track"].append({ - "@type": track["@type"], - "extra": track.get("extra"), - }) + filtered["media"]["track"].append( + { + "@type": track["@type"], + "extra": track.get("extra"), + } + ) return filtered - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: + if ( + not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") + and export_text + ): console.print("[bold yellow]Exporting MediaInfo...") if not isdir: os.chdir(os.path.dirname(video)) - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: + media_info = MediaInfo.parse( + video, + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ) + with open( + f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", + "w", + newline="", + encoding="utf-8", + ) as export: export.write(media_info) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: - export_cleanpath.write(media_info.replace(video, os.path.basename(video))) + with open( + f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", + "w", + newline="", + encoding="utf-8", + ) as export_cleanpath: + export_cleanpath.write( + media_info.replace(video, os.path.basename(video)) + ) console.print("[bold green]MediaInfo Exported.") if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): - media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) + media_info_json = MediaInfo.parse( + video, output="JSON", mediainfo_options={"inform_version": "1"} + ) media_info_dict = json.loads(media_info_json) filtered_info = filter_mediainfo(media_info_dict) - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + with open( + f"{base_dir}/tmp/{folder_id}/MediaInfo.json", "w", encoding="utf-8" + ) as export: json.dump(filtered_info, export, indent=4) - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: + with open( + f"{base_dir}/tmp/{folder_id}/MediaInfo.json", "r", encoding="utf-8" + ) as f: mi = json.load(f) return mi @@ -931,17 +1337,19 @@ def filter_mediainfo(data): """ def get_resolution(self, guess, folder_id, base_dir): - with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: + with open( + f"{base_dir}/tmp/{folder_id}/MediaInfo.json", "r", encoding="utf-8" + ) as f: mi = json.load(f) try: - width = mi['media']['track'][1]['Width'] - height = mi['media']['track'][1]['Height'] + width = mi["media"]["track"][1]["Width"] + height = mi["media"]["track"][1]["Height"] except Exception: width = 0 height = 0 - framerate = mi['media']['track'][1].get('FrameRate', '') + framerate = mi["media"]["track"][1].get("FrameRate", "") try: - scan = mi['media']['track'][1]['ScanType'] + scan = mi["media"]["track"][1]["ScanType"] except Exception: scan = "Progressive" if scan == "Progressive": @@ -956,7 +1364,9 @@ def get_resolution(self, guess, folder_id, base_dir): actual_height = int(height) height = self.closest(height_list, int(height)) res = f"{width}x{height}{scan}" - resolution = self.mi_resolution(res, guess, width, scan, height, actual_height) + resolution = self.mi_resolution( + res, guess, width, scan, height, actual_height + ) return resolution def closest(self, lst, K): @@ -976,47 +1386,66 @@ def closest(self, lst, K): def mi_resolution(self, res, guess, width, scan, height, actual_height): res_map = { - "3840x2160p": "2160p", "2160p": "2160p", - "2560x1440p": "1440p", "1440p": "1440p", - "1920x1080p": "1080p", "1080p": "1080p", - "1920x1080i": "1080i", "1080i": "1080i", - "1280x720p": "720p", "720p": "720p", - "1280x540p": "720p", "1280x576p": "720p", - "1024x576p": "576p", "576p": "576p", - "1024x576i": "576i", "576i": "576i", - "854x480p": "480p", "480p": "480p", - "854x480i": "480i", "480i": "480i", - "720x576p": "576p", "576p": "576p", - "720x576i": "576i", "576i": "576i", - "720x480p": "480p", "480p": "480p", - "720x480i": "480i", "480i": "480i", - "15360x8640p": "8640p", "8640p": "8640p", - "7680x4320p": "4320p", "4320p": "4320p", - "OTHER": "OTHER"} + "3840x2160p": "2160p", + "2160p": "2160p", + "2560x1440p": "1440p", + "1440p": "1440p", + "1920x1080p": "1080p", + "1080p": "1080p", + "1920x1080i": "1080i", + "1080i": "1080i", + "1280x720p": "720p", + "720p": "720p", + "1280x540p": "720p", + "1280x576p": "720p", + "1024x576p": "576p", + "576p": "576p", + "1024x576i": "576i", + "576i": "576i", + "854x480p": "480p", + "480p": "480p", + "854x480i": "480i", + "480i": "480i", + "720x576p": "576p", + "576p": "576p", + "720x576i": "576i", + "576i": "576i", + "720x480p": "480p", + "480p": "480p", + "720x480i": "480i", + "480i": "480i", + "15360x8640p": "8640p", + "8640p": "8640p", + "7680x4320p": "4320p", + "4320p": "4320p", + "OTHER": "OTHER", + } resolution = res_map.get(res, None) if actual_height == 540: resolution = "OTHER" if resolution is None: try: - resolution = guess['screen_size'] + resolution = guess["screen_size"] except Exception: width_map = { - '3840p': '2160p', - '2560p': '1550p', - '1920p': '1080p', - '1920i': '1080i', - '1280p': '720p', - '1024p': '576p', - '1024i': '576i', - '854p': '480p', - '854i': '480i', - '720p': '576p', - '720i': '576i', - '15360p': '4320p', - 'OTHERp': 'OTHER' + "3840p": "2160p", + "2560p": "1550p", + "1920p": "1080p", + "1920i": "1080i", + "1280p": "720p", + "1024p": "576p", + "1024i": "576i", + "854p": "480p", + "854i": "480i", + "720p": "576p", + "720i": "576i", + "15360p": "4320p", + "OTHERp": "OTHER", } resolution = width_map.get(f"{width}{scan}", "OTHER") - resolution = self.mi_resolution(resolution, guess, width, scan, height, actual_height) + resolution = self.mi_resolution( + resolution, guess, width, scan, height, actual_height + ) return resolution @@ -1030,6 +1459,7 @@ def is_sd(self, resolution): """ Is a scene release? """ + def is_scene(self, video, imdb=None): scene = False base = os.path.basename(video) @@ -1039,14 +1469,20 @@ def is_scene(self, video, imdb=None): try: response = requests.get(url, timeout=30) response = response.json() - if int(response.get('resultsCount', 0)) != 0: + if int(response.get("resultsCount", 0)) != 0: video = f"{response['results'][0]['release']}.mkv" scene = True r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") r = r.json() - if r['releases'] != [] and imdb is None: - imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb - console.print(f"[green]SRRDB: Matched to {response['results'][0]['release']}") + if r["releases"] != [] and imdb is None: + imdb = ( + r["releases"][0].get("imdb", imdb) + if r["releases"][0].get("imdb") is not None + else imdb + ) + console.print( + f"[green]SRRDB: Matched to {response['results'][0]['release']}" + ) except Exception: video = video scene = False @@ -1057,87 +1493,123 @@ def is_scene(self, video, imdb=None): Generate Screenshots """ - def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): + def disc_screenshots( + self, + filename, + bdinfo, + folder_id, + base_dir, + use_vs, + image_list, + ffdebug, + num_screens=None, + ): if num_screens is None: num_screens = self.screens if num_screens == 0 or len(image_list) >= num_screens: return # Get longest m2ts length = 0 - for each in bdinfo['files']: - int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) + for each in bdinfo["files"]: + int_length = sum( + int(float(x)) * 60**i + for i, x in enumerate(reversed(each["length"].split(":"))) + ) if int_length > length: length = int_length - for root, dirs, files in os.walk(bdinfo['path']): + for root, dirs, files in os.walk(bdinfo["path"]): for name in files: - if name.lower() == each['file'].lower(): + if name.lower() == each["file"].lower(): file = f"{root}/{name}" - if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "": - keyframe = 'nokey' + if "VC-1" in bdinfo["video"][0]["codec"] or bdinfo["video"][0]["hdr_dv"] != "": + keyframe = "nokey" else: - keyframe = 'none' + keyframe = "none" os.chdir(f"{base_dir}/tmp/{folder_id}") i = len(glob.glob(f"{filename}-*.png")) if i >= num_screens: i = num_screens - console.print('[bold green]Reusing screenshots') + console.print("[bold green]Reusing screenshots") else: console.print("[bold yellow]Saving Screens...") if use_vs is True: from src.vs import vs_screengn - vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + + vs_screengn( + source=file, + encode=None, + filter_b_frames=False, + num=num_screens, + dir=f"{base_dir}/tmp/{folder_id}/", + ) else: if bool(ffdebug) is True: - loglevel = 'verbose' + loglevel = "verbose" debug = False else: - loglevel = 'quiet' + loglevel = "quiet" debug = True with Progress( TextColumn("[bold green]Saving Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() + TimeRemainingColumn(), ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + screen_task = progress.add_task( + "[green]Saving Screens...", total=num_screens + 1 + ) ss_times = [] for i in range(num_screens + 1): image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" try: - ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) + ss_times = self.valid_ss_time( + ss_times, num_screens + 1, length + ) ( - ffmpeg - .input(file, ss=ss_times[-1], skip_frame=keyframe) + ffmpeg.input( + file, ss=ss_times[-1], skip_frame=keyframe + ) .output(image, vframes=1, pix_fmt="rgb24") .overwrite_output() - .global_args('-loglevel', loglevel) + .global_args("-loglevel", loglevel) .run(quiet=debug) ) except Exception: console.print(traceback.format_exc()) self.optimize_images(image) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + if ( + os.path.getsize(Path(image)) <= 31000000 + and self.img_host == "imgbb" + ): i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + elif os.path.getsize( + Path(image) + ) <= 10000000 and self.img_host in ["imgbox", "pixhost"]: i += 1 elif os.path.getsize(Path(image)) <= 75000: - console.print("[bold yellow]Image is incredibly small, retaking") + console.print( + "[bold yellow]Image is incredibly small, retaking" + ) time.sleep(1) elif self.img_host == "ptpimg": i += 1 elif self.img_host == "lensdump": i += 1 else: - console.print("[red]Image too large for your image host, retaking") + console.print( + "[red]Image too large for your image host, retaking" + ) time.sleep(1) progress.advance(screen_task) # remove smallest image smallest = None - smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): + smallestsize = 99**99 + for screens in glob.glob1( + f"{base_dir}/tmp/{folder_id}/", f"{filename}-*" + ): screen_path = os.path.join(f"{base_dir}/tmp/{folder_id}/", screens) screensize = os.path.getsize(screen_path) if screensize < smallestsize: @@ -1150,19 +1622,26 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ def dvd_screenshots(self, meta, disc_num, num_screens=None): if num_screens is None: num_screens = self.screens - if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): + if num_screens == 0 or ( + len(meta.get("image_list", [])) >= num_screens and disc_num == 0 + ): return - ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) + ifo_mi = MediaInfo.parse( + f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", + mediainfo_options={"inform_version": "1"}, + ) sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": if isinstance(track.duration, str): # If the duration is a string, split and find the longest duration - durations = [float(d) for d in track.duration.split(' / ')] + durations = [float(d) for d in track.duration.split(" / ")] length = max(durations) / 1000 # Use the longest duration else: # If the duration is already an int or float, use it directly - length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds + length = ( + float(track.duration) / 1000 + ) # noqa #F841 # Convert to seconds # Proceed as usual for other fields par = float(track.pixel_aspect_ratio) @@ -1180,22 +1659,29 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): w_sar = sar h_sar = 1 - main_set_length = len(meta['discs'][disc_num]['main_set']) + main_set_length = len(meta["discs"][disc_num]["main_set"]) if main_set_length >= 3: - main_set = meta['discs'][disc_num]['main_set'][1:-1] + main_set = meta["discs"][disc_num]["main_set"][1:-1] elif main_set_length == 2: - main_set = meta['discs'][disc_num]['main_set'][1:] + main_set = meta["discs"][disc_num]["main_set"][1:] elif main_set_length == 1: - main_set = meta['discs'][disc_num]['main_set'] + main_set = meta["discs"][disc_num]["main_set"] n = 0 os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") i = 0 - if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: + if ( + len( + glob.glob( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png" + ) + ) + >= num_screens + ): i = num_screens - console.print('[bold green]Reusing screenshots') + console.print("[bold green]Reusing screenshots") else: - if bool(meta.get('ffdebug', False)) is True: - loglevel = 'verbose' + if bool(meta.get("ffdebug", False)) is True: + loglevel = "verbose" debug = False looped = 0 retake = False @@ -1203,9 +1689,11 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): TextColumn("[bold green]Saving Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() + TimeRemainingColumn(), ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + screen_task = progress.add_task( + "[green]Saving Screens...", total=num_screens + 1 + ) ss_times = [] for i in range(num_screens + 1): if n >= len(main_set): @@ -1215,22 +1703,29 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" if not os.path.exists(image) or retake is not False: retake = False - loglevel = 'quiet' + loglevel = "quiet" debug = True - if bool(meta.get('debug', False)): - loglevel = 'error' + if bool(meta.get("debug", False)): + loglevel = "error" debug = False def _is_vob_good(n, loops, num_screens): voblength = 300 - vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') + vob_mi = MediaInfo.parse( + f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", + output="JSON", + ) vob_mi = json.loads(vob_mi) try: - voblength = float(vob_mi['media']['track'][1]['Duration']) + voblength = float( + vob_mi["media"]["track"][1]["Duration"] + ) return voblength, n except Exception: try: - voblength = float(vob_mi['media']['track'][2]['Duration']) + voblength = float( + vob_mi["media"]["track"][2]["Duration"] + ) return voblength, n except Exception: n += 1 @@ -1240,22 +1735,33 @@ def _is_vob_good(n, loops, num_screens): n -= num_screens if loops < 6: loops = loops + 1 - voblength, n = _is_vob_good(n, loops, num_screens) + voblength, n = _is_vob_good( + n, loops, num_screens + ) return voblength, n else: return 300, n + try: voblength, n = _is_vob_good(n, 0, num_screens) # img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) - ss_times = self.valid_ss_time(ss_times, num_screens + 1, voblength) - ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) + ss_times = self.valid_ss_time( + ss_times, num_screens + 1, voblength + ) + ff = ffmpeg.input( + f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", + ss=ss_times[-1], + ) if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + ff = ff.filter( + "scale", + int(round(width * w_sar)), + int(round(height * h_sar)), + ) ( - ff - .output(image, vframes=1, pix_fmt="rgb24") + ff.output(image, vframes=1, pix_fmt="rgb24") .overwrite_output() - .global_args('-loglevel', loglevel) + .global_args("-loglevel", loglevel) .run(quiet=debug) ) except Exception: @@ -1263,12 +1769,19 @@ def _is_vob_good(n, loops, num_screens): self.optimize_images(image) n += 1 try: - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + if ( + os.path.getsize(Path(image)) <= 31000000 + and self.img_host == "imgbb" + ): i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + elif os.path.getsize( + Path(image) + ) <= 10000000 and self.img_host in ["imgbox", "pixhost"]: i += 1 elif os.path.getsize(Path(image)) <= 75000: - console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") + console.print( + "[yellow]Image is incredibly small (and is most likely to be a single color), retaking" + ) retake = True time.sleep(1) elif self.img_host == "ptpimg": @@ -1278,21 +1791,28 @@ def _is_vob_good(n, loops, num_screens): elif self.img_host == "ptscreens": i += 1 else: - console.print("[red]Image too large for your image host, retaking") + console.print( + "[red]Image too large for your image host, retaking" + ) retake = True time.sleep(1) looped = 0 except Exception: if looped >= 25: - console.print('[red]Failed to take screenshots') + console.print("[red]Failed to take screenshots") exit() looped += 1 progress.advance(screen_task) # remove smallest image smallest = None smallestsize = 99**99 - for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): - screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) + for screens in glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}/", + f"{meta['discs'][disc_num]['name']}-*", + ): + screen_path = os.path.join( + f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens + ) screensize = os.path.getsize(screen_path) if screensize < smallestsize: smallestsize = screensize @@ -1301,17 +1821,32 @@ def _is_vob_good(n, loops, num_screens): if smallest is not None: os.remove(smallest) - def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False): + def screenshots( + self, + path, + filename, + folder_id, + base_dir, + meta, + num_screens=None, + force_screenshots=False, + ): # Ensure the image list is initialized and preserve existing images - if 'image_list' not in meta: - meta['image_list'] = [] + if "image_list" not in meta: + meta["image_list"] = [] # Check if there are already at least 3 image links in the image list - existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + existing_images = [ + img + for img in meta["image_list"] + if isinstance(img, dict) and img.get("img_url", "").startswith("http") + ] # Skip taking screenshots if there are already 3 images and force_screenshots is False if len(existing_images) >= 3 and not force_screenshots: - console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + console.print( + "[yellow]There are already at least 3 images in the image list. Skipping additional screenshots." + ) return # Determine the number of screenshots to take @@ -1320,14 +1855,14 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non if num_screens <= 0: return - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding="utf-8") as f: mi = json.load(f) - video_track = mi['media']['track'][1] - length = video_track.get('Duration', mi['media']['track'][0]['Duration']) - width = float(video_track.get('Width')) - height = float(video_track.get('Height')) - par = float(video_track.get('PixelAspectRatio', 1)) - dar = float(video_track.get('DisplayAspectRatio')) + video_track = mi["media"]["track"][1] + length = video_track.get("Duration", mi["media"]["track"][0]["Duration"]) + width = float(video_track.get("Width")) + height = float(video_track.get("Height")) + par = float(video_track.get("PixelAspectRatio", 1)) + dar = float(video_track.get("DisplayAspectRatio")) if par == 1: sar = w_sar = h_sar = 1 @@ -1344,40 +1879,58 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non i = 0 if len(glob.glob(f"{filename}-*.png")) >= num_screens: i = num_screens - console.print('[bold green]Reusing screenshots') + console.print("[bold green]Reusing screenshots") else: - loglevel = 'quiet' + loglevel = "quiet" debug = True - if bool(meta.get('ffdebug', False)) is True: - loglevel = 'verbose' + if bool(meta.get("ffdebug", False)) is True: + loglevel = "verbose" debug = False - if meta.get('vapoursynth', False) is True: + if meta.get("vapoursynth", False) is True: from src.vs import vs_screengn - vs_screengn(source=path, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + + vs_screengn( + source=path, + encode=None, + filter_b_frames=False, + num=num_screens, + dir=f"{base_dir}/tmp/{folder_id}/", + ) else: retake = False with Progress( TextColumn("[bold green]Saving Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() + TimeRemainingColumn(), ) as progress: ss_times = [] - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + screen_task = progress.add_task( + "[green]Saving Screens...", total=num_screens + 1 + ) for i in range(num_screens + 1): - image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + image_path = os.path.abspath( + f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" + ) if not os.path.exists(image_path) or retake is not False: retake = False try: - ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) + ss_times = self.valid_ss_time( + ss_times, num_screens + 1, length + ) ff = ffmpeg.input(path, ss=ss_times[-1]) if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + ff = ff.filter( + "scale", + int(round(width * w_sar)), + int(round(height * h_sar)), + ) ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24") + ff.output( + image_path, vframes=1, pix_fmt="rgb24" + ) .overwrite_output() - .global_args('-loglevel', loglevel) + .global_args("-loglevel", loglevel) .run(quiet=debug) ) except (KeyboardInterrupt, Exception): @@ -1385,22 +1938,39 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non self.optimize_images(image_path) if os.path.getsize(Path(image_path)) <= 75000: - console.print("[yellow]Image is incredibly small, retaking") + console.print( + "[yellow]Image is incredibly small, retaking" + ) retake = True time.sleep(1) - if os.path.getsize(Path(image_path)) <= 31000000 and self.img_host == "imgbb" and retake is False: + if ( + os.path.getsize(Path(image_path)) <= 31000000 + and self.img_host == "imgbb" + and retake is False + ): i += 1 - elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake is False: + elif ( + os.path.getsize(Path(image_path)) <= 10000000 + and self.img_host in ["imgbox", "pixhost"] + and retake is False + ): i += 1 - elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake is False: + elif ( + self.img_host in ["ptpimg", "lensdump", "ptscreens"] + and retake is False + ): i += 1 elif self.img_host == "freeimage.host": - console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") + console.print( + "[bold red]Support for freeimage.host has been removed. Please remove from your config" + ) exit() elif retake is True: pass else: - console.print("[red]Image too large for your image host, retaking") + console.print( + "[red]Image too large for your image host, retaking" + ) retake = True time.sleep(1) else: @@ -1411,22 +1981,31 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non new_images = glob.glob(f"{filename}-*.png") for image in new_images: img_dict = { - 'img_url': image, - 'raw_url': image, - 'web_url': image # Assuming local path, but you might need to update this if uploading + "img_url": image, + "raw_url": image, + "web_url": image, # Assuming local path, but you might need to update this if uploading } - meta['image_list'].append(img_dict) + meta["image_list"].append(img_dict) # Remove the smallest image if there are more than needed - if len(meta['image_list']) > self.screens: - local_images = [img for img in meta['image_list'] if not img['img_url'].startswith('http')] + if len(meta["image_list"]) > self.screens: + local_images = [ + img + for img in meta["image_list"] + if not img["img_url"].startswith("http") + ] if local_images: - smallest = min(local_images, key=lambda x: os.path.getsize(x['img_url'])) - os.remove(smallest['img_url']) - meta['image_list'].remove(smallest) + smallest = min( + local_images, + key=lambda x: os.path.getsize(x["img_url"]), + ) + os.remove(smallest["img_url"]) + meta["image_list"].remove(smallest) else: - console.print("[yellow]No local images found to remove.") + console.print( + "[yellow]No local images found to remove." + ) def valid_ss_time(self, ss_times, num_screens, length): valid_time = False @@ -1445,7 +2024,7 @@ def valid_ss_time(self, ss_times, num_screens, length): return ss_times def optimize_images(self, image): - if self.config['DEFAULT'].get('optimize_images', True) is True: + if self.config["DEFAULT"].get("optimize_images", True) is True: if os.path.exists(image): try: pyver = platform.python_version_tuple() @@ -1472,7 +2051,7 @@ def get_type(self, video, scene, is_disc): elif "webrip" in filename: type = "WEBRIP" # elif scene == True: - # type = "ENCODE" + # type = "ENCODE" elif "hdtv" in filename: type = "HDTV" elif is_disc is not None: @@ -1486,7 +2065,7 @@ def get_type(self, video, scene, is_disc): def get_cat(self, video): # if category is None: - category = guessit(video.replace('1.0', ''))['type'] + category = guessit(video.replace("1.0", ""))["type"] if category.lower() == "movie": category = "MOVIE" # 1 elif category.lower() in ("tv", "episode"): @@ -1496,59 +2075,73 @@ def get_cat(self, video): return category async def get_tmdb_from_imdb(self, meta, filename): - if meta.get('tmdb_manual') is not None: - meta['tmdb'] = meta['tmdb_manual'] + if meta.get("tmdb_manual") is not None: + meta["tmdb"] = meta["tmdb_manual"] return meta - imdb_id = meta['imdb'] + imdb_id = meta["imdb"] if str(imdb_id)[:2].lower() != "tt": imdb_id = f"tt{imdb_id}" find = tmdb.Find(id=imdb_id) info = find.info(external_source="imdb_id") - if len(info['movie_results']) >= 1: - meta['category'] = "MOVIE" - meta['tmdb'] = info['movie_results'][0]['id'] - elif len(info['tv_results']) >= 1: - meta['category'] = "TV" - meta['tmdb'] = info['tv_results'][0]['id'] + if len(info["movie_results"]) >= 1: + meta["category"] = "MOVIE" + meta["tmdb"] = info["movie_results"][0]["id"] + elif len(info["tv_results"]) >= 1: + meta["category"] = "TV" + meta["tmdb"] = info["tv_results"][0]["id"] else: - imdb_info = await self.get_imdb_info(imdb_id.replace('tt', ''), meta) + imdb_info = await self.get_imdb_info(imdb_id.replace("tt", ""), meta) title = imdb_info.get("title") if title is None: title = filename - year = imdb_info.get('year') + year = imdb_info.get("year") if year is None: - year = meta['search_year'] - console.print(f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}") - meta = await self.get_tmdb_id(title, year, meta, meta['category'], imdb_info.get('original title', imdb_info.get('localized title', meta['uuid']))) - if meta.get('tmdb') in ('None', '', None, 0, '0'): - if meta.get('mode', 'discord') == 'cli': - console.print('[yellow]Unable to find a matching TMDb entry') + year = meta["search_year"] + console.print( + f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}" + ) + meta = await self.get_tmdb_id( + title, + year, + meta, + meta["category"], + imdb_info.get( + "original title", imdb_info.get("localized title", meta["uuid"]) + ), + ) + if meta.get("tmdb") in ("None", "", None, 0, "0"): + if meta.get("mode", "discord") == "cli": + console.print("[yellow]Unable to find a matching TMDb entry") tmdb_id = console.input("Please enter tmdb id: ") parser = Args(config=self.config) - meta['category'], meta['tmdb'] = parser.parse_tmdb_id(id=tmdb_id, category=meta.get('category')) + meta["category"], meta["tmdb"] = parser.parse_tmdb_id( + id=tmdb_id, category=meta.get("category") + ) await asyncio.sleep(2) return meta - async def get_tmdb_id(self, filename, search_year, meta, category, untouched_filename="", attempted=0): + async def get_tmdb_id( + self, filename, search_year, meta, category, untouched_filename="", attempted=0 + ): search = tmdb.Search() try: if category == "MOVIE": search.movie(query=filename, year=search_year) elif category == "TV": search.tv(query=filename, first_air_date_year=search_year) - if meta.get('tmdb_manual') is not None: - meta['tmdb'] = meta['tmdb_manual'] + if meta.get("tmdb_manual") is not None: + meta["tmdb"] = meta["tmdb_manual"] else: - meta['tmdb'] = search.results[0]['id'] - meta['category'] = category + meta["tmdb"] = search.results[0]["id"] + meta["category"] = category except IndexError: try: if category == "MOVIE": search.movie(query=filename) elif category == "TV": search.tv(query=filename) - meta['tmdb'] = search.results[0]['id'] - meta['category'] = category + meta["tmdb"] = search.results[0]["id"] + meta["category"] = category except IndexError: if category == "MOVIE": category = "TV" @@ -1556,191 +2149,254 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil category = "MOVIE" if attempted <= 1: attempted += 1 - meta = await self.get_tmdb_id(filename, search_year, meta, category, untouched_filename, attempted) + meta = await self.get_tmdb_id( + filename, + search_year, + meta, + category, + untouched_filename, + attempted, + ) elif attempted == 2: attempted += 1 - meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes": ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) - if meta['tmdb'] in (None, ""): + meta = await self.get_tmdb_id( + anitopy.parse( + guessit( + untouched_filename, + {"excludes": ["country", "language"]}, + )["title"] + )["anime_title"], + search_year, + meta, + meta["category"], + untouched_filename, + attempted, + ) + if meta["tmdb"] in (None, ""): console.print(f"[red]Unable to find TMDb match for {filename}") - if meta.get('mode', 'discord') == 'cli': - tmdb_id = cli_ui.ask_string("Please enter tmdb id in this format: tv/12345 or movie/12345") + if meta.get("mode", "discord") == "cli": + tmdb_id = cli_ui.ask_string( + "Please enter tmdb id in this format: tv/12345 or movie/12345" + ) parser = Args(config=self.config) - meta['category'], meta['tmdb'] = parser.parse_tmdb_id(id=tmdb_id, category=meta.get('category')) - meta['tmdb_manual'] = meta['tmdb'] + meta["category"], meta["tmdb"] = parser.parse_tmdb_id( + id=tmdb_id, category=meta.get("category") + ) + meta["tmdb_manual"] = meta["tmdb"] return meta return meta async def tmdb_other_meta(self, meta): - if meta['tmdb'] == "0": + if meta["tmdb"] == "0": try: - title = guessit(meta['path'], {"excludes": ["country", "language"]})['title'].lower() - title = title.split('aka')[0] - meta = await self.get_tmdb_id(guessit(title, {"excludes": ["country", "language"]})['title'], meta['search_year'], meta) - if meta['tmdb'] == "0": - meta = await self.get_tmdb_id(title, "", meta, meta['category']) + title = guessit(meta["path"], {"excludes": ["country", "language"]})[ + "title" + ].lower() + title = title.split("aka")[0] + meta = await self.get_tmdb_id( + guessit(title, {"excludes": ["country", "language"]})["title"], + meta["search_year"], + meta, + ) + if meta["tmdb"] == "0": + meta = await self.get_tmdb_id(title, "", meta, meta["category"]) except Exception: - if meta.get('mode', 'discord') == 'cli': + if meta.get("mode", "discord") == "cli": console.print("[bold red]Unable to find tmdb entry. Exiting.") exit() else: console.print("[bold red]Unable to find tmdb entry") return meta - if meta['category'] == "MOVIE": - movie = tmdb.Movies(meta['tmdb']) + if meta["category"] == "MOVIE": + movie = tmdb.Movies(meta["tmdb"]) response = movie.info() - meta['title'] = response['title'] - if response['release_date']: - meta['year'] = datetime.strptime(response['release_date'], '%Y-%m-%d').year + meta["title"] = response["title"] + if response["release_date"]: + meta["year"] = datetime.strptime( + response["release_date"], "%Y-%m-%d" + ).year else: - console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') - meta['year'] = meta['search_year'] + console.print( + "[yellow]TMDB does not have a release date, using year from filename instead (if it exists)" + ) + meta["year"] = meta["search_year"] external = movie.external_ids() - if meta.get('imdb', None) is None: - imdb_id = external.get('imdb_id', "0") + if meta.get("imdb", None) is None: + imdb_id = external.get("imdb_id", "0") if imdb_id == "" or imdb_id is None: - meta['imdb_id'] = '0' + meta["imdb_id"] = "0" else: - meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) + meta["imdb_id"] = str(int(imdb_id.replace("tt", ""))).zfill(7) else: - meta['imdb_id'] = str(meta['imdb']).replace('tt', '').zfill(7) - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' + meta["imdb_id"] = str(meta["imdb"]).replace("tt", "").zfill(7) + if meta.get("tvdb_id", "0") in ["", " ", None, "None", "0"]: + meta["tvdb_id"] = external.get("tvdb_id", "0") + if meta["tvdb_id"] in ["", None, " ", "None"]: + meta["tvdb_id"] = "0" try: videos = movie.videos() - for each in videos.get('results', []): - if each.get('site', "") == 'YouTube' and each.get('type', "") == "Trailer": - meta['youtube'] = f"https://www.youtube.com/watch?v={each.get('key')}" + for each in videos.get("results", []): + if ( + each.get("site", "") == "YouTube" + and each.get("type", "") == "Trailer" + ): + meta["youtube"] = ( + f"https://www.youtube.com/watch?v={each.get('key')}" + ) break except Exception: - console.print('[yellow]Unable to grab videos from TMDb.') + console.print("[yellow]Unable to grab videos from TMDb.") - meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) + meta["aka"], original_language = await self.get_imdb_aka(meta["imdb_id"]) if original_language is not None: - meta['original_language'] = original_language + meta["original_language"] = original_language else: - meta['original_language'] = response['original_language'] - - meta['original_title'] = response.get('original_title', meta['title']) - meta['keywords'] = self.get_keywords(movie) - meta['genres'] = self.get_genres(response) - meta['tmdb_directors'] = self.get_directors(movie) - if meta.get('anime', False) is False: - meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) - meta['poster'] = response.get('poster_path', "") - meta['tmdb_poster'] = response.get('poster_path', "") - meta['overview'] = response['overview'] - meta['tmdb_type'] = 'Movie' - meta['runtime'] = response.get('episode_run_time', 60) - elif meta['category'] == "TV": - tv = tmdb.TV(meta['tmdb']) + meta["original_language"] = response["original_language"] + + meta["original_title"] = response.get("original_title", meta["title"]) + meta["keywords"] = self.get_keywords(movie) + meta["genres"] = self.get_genres(response) + meta["tmdb_directors"] = self.get_directors(movie) + if meta.get("anime", False) is False: + meta["mal_id"], meta["aka"], meta["anime"] = self.get_anime( + response, meta + ) + meta["poster"] = response.get("poster_path", "") + meta["tmdb_poster"] = response.get("poster_path", "") + meta["overview"] = response["overview"] + meta["tmdb_type"] = "Movie" + meta["runtime"] = response.get("episode_run_time", 60) + elif meta["category"] == "TV": + tv = tmdb.TV(meta["tmdb"]) response = tv.info() - meta['title'] = response['name'] - if response['first_air_date']: - meta['year'] = datetime.strptime(response['first_air_date'], '%Y-%m-%d').year + meta["title"] = response["name"] + if response["first_air_date"]: + meta["year"] = datetime.strptime( + response["first_air_date"], "%Y-%m-%d" + ).year else: - console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') - meta['year'] = meta['search_year'] + console.print( + "[yellow]TMDB does not have a release date, using year from filename instead (if it exists)" + ) + meta["year"] = meta["search_year"] external = tv.external_ids() - if meta.get('imdb', None) is None: - imdb_id = external.get('imdb_id', "0") + if meta.get("imdb", None) is None: + imdb_id = external.get("imdb_id", "0") if imdb_id == "" or imdb_id is None: - meta['imdb_id'] = '0' + meta["imdb_id"] = "0" else: - meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) + meta["imdb_id"] = str(int(imdb_id.replace("tt", ""))).zfill(7) else: - meta['imdb_id'] = str(int(meta['imdb'].replace('tt', ''))).zfill(7) - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' + meta["imdb_id"] = str(int(meta["imdb"].replace("tt", ""))).zfill(7) + if meta.get("tvdb_id", "0") in ["", " ", None, "None", "0"]: + meta["tvdb_id"] = external.get("tvdb_id", "0") + if meta["tvdb_id"] in ["", None, " ", "None"]: + meta["tvdb_id"] = "0" try: videos = tv.videos() - for each in videos.get('results', []): - if each.get('site', "") == 'YouTube' and each.get('type', "") == "Trailer": - meta['youtube'] = f"https://www.youtube.com/watch?v={each.get('key')}" + for each in videos.get("results", []): + if ( + each.get("site", "") == "YouTube" + and each.get("type", "") == "Trailer" + ): + meta["youtube"] = ( + f"https://www.youtube.com/watch?v={each.get('key')}" + ) break except Exception: - console.print('[yellow]Unable to grab videos from TMDb.') + console.print("[yellow]Unable to grab videos from TMDb.") # meta['aka'] = f" AKA {response['original_name']}" - meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) + meta["aka"], original_language = await self.get_imdb_aka(meta["imdb_id"]) if original_language is not None: - meta['original_language'] = original_language + meta["original_language"] = original_language else: - meta['original_language'] = response['original_language'] - meta['original_title'] = response.get('original_name', meta['title']) - meta['keywords'] = self.get_keywords(tv) - meta['genres'] = self.get_genres(response) - meta['tmdb_directors'] = self.get_directors(tv) - meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) - meta['poster'] = response.get('poster_path', '') - meta['overview'] = response['overview'] - - meta['tmdb_type'] = response.get('type', 'Scripted') - runtime = response.get('episode_run_time', [60]) + meta["original_language"] = response["original_language"] + meta["original_title"] = response.get("original_name", meta["title"]) + meta["keywords"] = self.get_keywords(tv) + meta["genres"] = self.get_genres(response) + meta["tmdb_directors"] = self.get_directors(tv) + meta["mal_id"], meta["aka"], meta["anime"] = self.get_anime(response, meta) + meta["poster"] = response.get("poster_path", "") + meta["overview"] = response["overview"] + + meta["tmdb_type"] = response.get("type", "Scripted") + runtime = response.get("episode_run_time", [60]) if runtime == []: runtime = [60] - meta['runtime'] = runtime[0] - if meta['poster'] not in (None, ''): - meta['poster'] = f"https://image.tmdb.org/t/p/original{meta['poster']}" - - difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() - if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): - meta['aka'] = "" - if f"({meta['year']})" in meta['aka']: - meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() + meta["runtime"] = runtime[0] + if meta["poster"] not in (None, ""): + meta["poster"] = f"https://image.tmdb.org/t/p/original{meta['poster']}" + + difference = SequenceMatcher( + None, meta["title"].lower(), meta["aka"][5:].lower() + ).ratio() + if ( + difference >= 0.9 + or meta["aka"][5:].strip() == "" + or meta["aka"][5:].strip().lower() in meta["title"].lower() + ): + meta["aka"] = "" + if f"({meta['year']})" in meta["aka"]: + meta["aka"] = meta["aka"].replace(f"({meta['year']})", "").strip() return meta def get_keywords(self, tmdb_info): if tmdb_info is not None: tmdb_keywords = tmdb_info.keywords() - if tmdb_keywords.get('keywords') is not None: - keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('keywords')] - elif tmdb_keywords.get('results') is not None: - keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('results')] - return (', '.join(keywords)) + if tmdb_keywords.get("keywords") is not None: + keywords = [ + f"{keyword['name'].replace(',', ' ')}" + for keyword in tmdb_keywords.get("keywords") + ] + elif tmdb_keywords.get("results") is not None: + keywords = [ + f"{keyword['name'].replace(',', ' ')}" + for keyword in tmdb_keywords.get("results") + ] + return ", ".join(keywords) else: - return '' + return "" def get_genres(self, tmdb_info): if tmdb_info is not None: - tmdb_genres = tmdb_info.get('genres', []) - if tmdb_genres is not []: + tmdb_genres = tmdb_info.get("genres", []) + if tmdb_genres != []: genres = [f"{genre['name'].replace(',', ' ')}" for genre in tmdb_genres] - return (', '.join(genres)) + return ", ".join(genres) else: - return '' + return "" def get_directors(self, tmdb_info): if tmdb_info is not None: tmdb_credits = tmdb_info.credits() directors = [] - if tmdb_credits.get('cast', []) != []: - for each in tmdb_credits['cast']: - if each.get('known_for_department', '') == "Directing": - directors.append(each.get('original_name', each.get('name'))) + if tmdb_credits.get("cast", []) != []: + for each in tmdb_credits["cast"]: + if each.get("known_for_department", "") == "Directing": + directors.append(each.get("original_name", each.get("name"))) return directors else: - return '' + return "" def get_anime(self, response, meta): - tmdb_name = meta['title'] - if meta.get('aka', "") == "": + tmdb_name = meta["title"] + if meta.get("aka", "") == "": alt_name = "" else: - alt_name = meta['aka'] + alt_name = meta["aka"] anime = False animation = False - for each in response['genres']: - if each['id'] == 16: + for each in response["genres"]: + if each["id"] == 16: animation = True - if response['original_language'] == 'ja' and animation is True: - romaji, mal_id, eng_title, season_year, episodes = self.get_romaji(tmdb_name, meta.get('mal', None)) + if response["original_language"] == "ja" and animation is True: + romaji, mal_id, eng_title, season_year, episodes = self.get_romaji( + tmdb_name, meta.get("mal", None) + ) alt_name = f" AKA {romaji}" anime = True @@ -1748,18 +2404,18 @@ def get_anime(self, response, meta): # mal_id = mal.results[0].mal_id else: mal_id = 0 - if meta.get('mal_id', 0) != 0: - mal_id = meta.get('mal_id') - if meta.get('mal') not in ('0', 0, None): - mal_id = meta.get('mal', 0) + if meta.get("mal_id", 0) != 0: + mal_id = meta.get("mal_id") + if meta.get("mal") not in ("0", 0, None): + mal_id = meta.get("mal", 0) return mal_id, alt_name, anime def get_romaji(self, tmdb_name, mal): if mal is None: mal = 0 - tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") - tmdb_name = ' '.join(tmdb_name.split()) - query = ''' + tmdb_name = tmdb_name.replace("-", "").replace("The Movie", "") + tmdb_name = " ".join(tmdb_name.split()) + query = """ query ($search: String) { Page (page: 1) { pageInfo { @@ -1778,13 +2434,11 @@ def get_romaji(self, tmdb_name, mal): } } } - ''' + """ # Define our query variables and values that will be used in the query request - variables = { - 'search': tmdb_name - } + variables = {"search": tmdb_name} else: - query = ''' + query = """ query ($search: Int) { Page (page: 1) { pageInfo { @@ -1803,39 +2457,48 @@ def get_romaji(self, tmdb_name, mal): } } } - ''' + """ # Define our query variables and values that will be used in the query request - variables = { - 'search': mal - } + variables = {"search": mal} # Make the HTTP Api request - url = 'https://graphql.anilist.co' + url = "https://graphql.anilist.co" try: - response = requests.post(url, json={'query': query, 'variables': variables}) + response = requests.post(url, json={"query": query, "variables": variables}) json = response.json() - media = json['data']['Page']['media'] + media = json["data"]["Page"]["media"] except Exception: - console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') + console.print( + "[red]Failed to get anime specific info from anilist. Continuing without it..." + ) media = [] if media not in (None, []): - result = {'title': {}} + result = {"title": {}} difference = 0 for anime in media: - search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) - for title in anime['title'].values(): + search_name = re.sub( + r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(" ", "") + ) + for title in anime["title"].values(): if title is not None: - title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) + title = re.sub( + "[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)", + "", + title.lower().replace(" ", ""), + re.U, + ) diff = SequenceMatcher(None, title, search_name).ratio() if diff >= difference: result = anime difference = diff - romaji = result['title'].get('romaji', result['title'].get('english', "")) - mal_id = result.get('idMal', 0) - eng_title = result['title'].get('english', result['title'].get('romaji', "")) - season_year = result.get('season_year', "") - episodes = result.get('episodes', 0) + romaji = result["title"].get("romaji", result["title"].get("english", "")) + mal_id = result.get("idMal", 0) + eng_title = result["title"].get( + "english", result["title"].get("romaji", "") + ) + season_year = result.get("season_year", "") + episodes = result.get("episodes", 0) else: romaji = eng_title = season_year = "" episodes = mal_id = 0 @@ -1848,6 +2511,7 @@ def get_romaji(self, tmdb_name, mal): """ Mediainfo/Bdinfo > meta """ + def get_audio_v2(self, mi, meta, bdinfo): extra = dual = "" has_commentary = False @@ -1855,44 +2519,53 @@ def get_audio_v2(self, mi, meta, bdinfo): # Get formats if bdinfo is not None: # Disks format_settings = "" - format = bdinfo.get('audio', [{}])[0].get('codec', '') + format = bdinfo.get("audio", [{}])[0].get("codec", "") commercial = format - additional = bdinfo.get('audio', [{}])[0].get('atmos_why_you_be_like_this', '') + additional = bdinfo.get("audio", [{}])[0].get( + "atmos_why_you_be_like_this", "" + ) # Channels - chan = bdinfo.get('audio', [{}])[0].get('channels', '') + chan = bdinfo.get("audio", [{}])[0].get("channels", "") else: track_num = 2 - tracks = mi.get('media', {}).get('track', []) + tracks = mi.get("media", {}).get("track", []) for i, t in enumerate(tracks): - if t.get('@type') != "Audio": + if t.get("@type") != "Audio": continue - if t.get('Language', '') == meta.get('original_language', '') and "commentary" not in t.get('Title', '').lower(): + if ( + t.get("Language", "") == meta.get("original_language", "") + and "commentary" not in t.get("Title", "").lower() + ): track_num = i break track = tracks[track_num] if len(tracks) > track_num else {} - format = track.get('Format', '') - commercial = track.get('Format_Commercial', '') + format = track.get("Format", "") + commercial = track.get("Format_Commercial", "") - if track.get('Language', '') == "zxx": - meta['silent'] = True + if track.get("Language", "") == "zxx": + meta["silent"] = True - additional = track.get('Format_AdditionalFeatures', '') + additional = track.get("Format_AdditionalFeatures", "") - format_settings = track.get('Format_Settings', '') - if format_settings in ['Explicit']: + format_settings = track.get("Format_Settings", "") + if format_settings in ["Explicit"]: format_settings = "" # Channels - channels = mi['media']['track'][track_num].get('Channels_Original', mi['media']['track'][track_num]['Channels']) + channels = mi["media"]["track"][track_num].get( + "Channels_Original", mi["media"]["track"][track_num]["Channels"] + ) if not str(channels).isnumeric(): - channels = mi['media']['track'][track_num]['Channels'] + channels = mi["media"]["track"][track_num]["Channels"] try: - channel_layout = mi['media']['track'][track_num]['ChannelLayout'] + channel_layout = mi["media"]["track"][track_num]["ChannelLayout"] except Exception: try: - channel_layout = mi['media']['track'][track_num]['ChannelLayout_Original'] + channel_layout = mi["media"]["track"][track_num][ + "ChannelLayout_Original" + ] except Exception: channel_layout = "" @@ -1907,48 +2580,81 @@ def get_audio_v2(self, mi, meta, bdinfo): else: chan = f"{channels}.0" - if meta.get('original_language', '') != 'en': + if meta.get("original_language", "") != "en": eng, orig = False, False try: - for t in mi.get('media', {}).get('track', []): - if t.get('@type') != "Audio": + for t in mi.get("media", {}).get("track", []): + if t.get("@type") != "Audio": continue - audio_language = t.get('Language', '') + audio_language = t.get("Language", "") # Check for English Language Track - if audio_language == "en" and "commentary" not in t.get('Title', '').lower(): + if ( + audio_language == "en" + and "commentary" not in t.get("Title", "").lower() + ): eng = True # Check for original Language Track - if audio_language == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): + if ( + audio_language == meta["original_language"] + and "commentary" not in t.get("Title", "").lower() + ): orig = True # Catch Chinese / Norwegian / Spanish variants - variants = ['zh', 'cn', 'cmn', 'no', 'nb', 'es-419', 'es-ES', 'es'] - if audio_language in variants and meta['original_language'] in variants: + variants = [ + "zh", + "cn", + "cmn", + "no", + "nb", + "es-419", + "es-ES", + "es", + ] + if ( + audio_language in variants + and meta["original_language"] in variants + ): orig = True # Check for additional, bloated Tracks - if audio_language != meta['original_language'] and audio_language != "en": - if meta['original_language'] not in variants and audio_language not in variants: - audio_language = "und" if audio_language == "" else audio_language - console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") + if ( + audio_language != meta["original_language"] + and audio_language != "en" + ): + if ( + meta["original_language"] not in variants + and audio_language not in variants + ): + audio_language = ( + "und" if audio_language == "" else audio_language + ) + console.print( + f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated" + ) time.sleep(5) if eng and orig: dual = "Dual-Audio" - elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): + elif ( + eng + and not orig + and meta["original_language"] not in ["zxx", "xx", None] + and not meta.get("no_dub", False) + ): dual = "Dubbed" except Exception: console.print(traceback.format_exc()) pass - for t in mi.get('media', {}).get('track', []): - if t.get('@type') != "Audio": + for t in mi.get("media", {}).get("track", []): + if t.get("@type") != "Audio": continue - if "commentary" in t.get('Title', '').lower(): + if "commentary" in t.get("Title", "").lower(): has_commentary = True # Convert commercial name to naming conventions @@ -1964,7 +2670,6 @@ def get_audio_v2(self, mi, meta, bdinfo): "Opus": "OPUS", "Vorbis": "VORBIS", "PCM": "LPCM", - # BDINFO AUDIOS "LPCM Audio": "LPCM", "Dolby Digital Audio": "DD", @@ -1974,7 +2679,7 @@ def get_audio_v2(self, mi, meta, bdinfo): "DTS Audio": "DTS", "DTS-HD Master Audio": "DTS-HD MA", "DTS-HD High-Res Audio": "DTS-HD HRA", - "DTS:X Master Audio": "DTS:X" + "DTS:X Master Audio": "DTS:X", } audio_extra = { "XLL": "-HD MA", @@ -1986,9 +2691,7 @@ def get_audio_v2(self, mi, meta, bdinfo): "16-ch": " Atmos", "Atmos Audio": " Atmos", } - format_settings_extra = { - "Dolby Surround EX": "EX" - } + format_settings_extra = {"Dolby Surround EX": "EX"} commercial_names = { "Dolby Digital": "DD", @@ -1997,7 +2700,7 @@ def get_audio_v2(self, mi, meta, bdinfo): "DTS-ES": "DTS-ES", "DTS-HD High": "DTS-HD HRA", "Free Lossless Audio Codec": "FLAC", - "DTS-HD Master Audio": "DTS-HD MA" + "DTS-HD Master Audio": "DTS-HD MA", } search_format = True @@ -2007,7 +2710,10 @@ def get_audio_v2(self, mi, meta, bdinfo): if key in commercial: codec = value search_format = False - if "Atmos" in commercial or format_extra.get(additional, "") == " Atmos": + if ( + "Atmos" in commercial + or format_extra.get(additional, "") == " Atmos" + ): extra = " Atmos" if search_format: @@ -2031,16 +2737,18 @@ def get_audio_v2(self, mi, meta, bdinfo): codec = "DTS:X" chan = f"{int(channels) - 1}.1" if format == "MPEG Audio": - codec = mi['media']['track'][2].get('CodecID_Hint', '') + codec = mi["media"]["track"][2].get("CodecID_Hint", "") # Ensure audio is constructed properly even with potential None values - audio = f"{dual} {codec or ''} {format_settings or ''} {chan or ''}{extra or ''}" - audio = ' '.join(audio.split()) + audio = ( + f"{dual} {codec or ''} {format_settings or ''} {chan or ''}{extra or ''}" + ) + audio = " ".join(audio.split()) return audio, chan, has_commentary def is_3d(self, mi, bdinfo): if bdinfo is not None: - if bdinfo['video'][0]['3d'] != "": + if bdinfo["video"][0]["3d"] != "": return "3D" else: return "" @@ -2049,36 +2757,41 @@ def is_3d(self, mi, bdinfo): def get_tag(self, video, meta): try: - tag = guessit(video)['release_group'] + tag = guessit(video)["release_group"] tag = f"-{tag}" except Exception: tag = "" if tag == "-": tag = "" - if tag[1:].lower() in ["nogroup", 'nogrp']: + if tag[1:].lower() in ["nogroup", "nogrp"]: tag = "" return tag def get_source(self, type, video, path, is_disc, meta): try: try: - source = guessit(video)['source'] + source = guessit(video)["source"] except Exception: try: - source = guessit(path)['source'] + source = guessit(path)["source"] except Exception: source = "BluRay" - if meta.get('manual_source', None): - source = meta['manual_source'] - if source in ("Blu-ray", "Ultra HD Blu-ray", "BluRay", "BR") or is_disc == "BDMV": + if meta.get("manual_source", None): + source = meta["manual_source"] + if ( + source in ("Blu-ray", "Ultra HD Blu-ray", "BluRay", "BR") + or is_disc == "BDMV" + ): if type == "DISC": source = "Blu-ray" - elif type in ('ENCODE', 'REMUX'): + elif type in ("ENCODE", "REMUX"): source = "BluRay" if is_disc == "DVD" or source in ("DVD", "dvd"): try: if is_disc == "DVD": - mediainfo = MediaInfo.parse(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_0.IFO") + mediainfo = MediaInfo.parse( + f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_0.IFO" + ) else: mediainfo = MediaInfo.parse(video) for track in mediainfo.tracks: @@ -2088,7 +2801,7 @@ def get_source(self, type, video, path, is_disc, meta): raise WeirdSystem # noqa: F405 except Exception: try: - other = guessit(video)['other'] + other = guessit(video)["other"] if "PAL" in other: system = "PAL" elif "NTSC" in other: @@ -2109,7 +2822,7 @@ def get_source(self, type, video, path, is_disc, meta): source = "HD DVD" if type in ("ENCODE", "REMUX"): source = "HDDVD" - if type in ("WEBDL", 'WEBRIP'): + if type in ("WEBDL", "WEBRIP"): source = "Web" if source == "Ultra HDTV": source = "UHDTV" @@ -2121,13 +2834,13 @@ def get_source(self, type, video, path, is_disc, meta): def get_uhd(self, type, guess, resolution, path): try: - source = guess['Source'] - other = guess['Other'] + source = guess["Source"] + other = guess["Other"] except Exception: source = "" other = "" uhd = "" - if source == 'Blu-ray' and other == "Ultra HD" or source == "Ultra HD Blu-ray": + if source == "Blu-ray" and other == "Ultra HD" or source == "Ultra HD Blu-ray": uhd = "UHD" elif "UHD" in path: uhd = "UHD" @@ -2143,30 +2856,40 @@ def get_hdr(self, mi, bdinfo): hdr = "" dv = "" if bdinfo is not None: # Disks - hdr_mi = bdinfo['video'][0]['hdr_dv'] + hdr_mi = bdinfo["video"][0]["hdr_dv"] if "HDR10+" in hdr_mi: hdr = "HDR10+" elif hdr_mi == "HDR10": hdr = "HDR" try: - if bdinfo['video'][1]['hdr_dv'] == "Dolby Vision": + if bdinfo["video"][1]["hdr_dv"] == "Dolby Vision": dv = "DV" except Exception: pass else: - video_track = mi['media']['track'][1] + video_track = mi["media"]["track"][1] try: - hdr_mi = video_track['colour_primaries'] + hdr_mi = video_track["colour_primaries"] if hdr_mi in ("BT.2020", "REC.2020"): hdr = "" - hdr_format_string = video_track.get('HDR_Format_Compatibility', video_track.get('HDR_Format_String', video_track.get('HDR_Format', ""))) + hdr_format_string = video_track.get( + "HDR_Format_Compatibility", + video_track.get( + "HDR_Format_String", video_track.get("HDR_Format", "") + ), + ) if "HDR10" in hdr_format_string: hdr = "HDR" if "HDR10+" in hdr_format_string: hdr = "HDR10+" - if hdr_format_string == "" and "PQ" in (video_track.get('transfer_characteristics'), video_track.get('transfer_characteristics_Original', None)): + if hdr_format_string == "" and "PQ" in ( + video_track.get("transfer_characteristics"), + video_track.get("transfer_characteristics_Original", None), + ): hdr = "PQ10" - transfer_characteristics = video_track.get('transfer_characteristics_Original', None) + transfer_characteristics = video_track.get( + "transfer_characteristics_Original", None + ) if "HLG" in transfer_characteristics: hdr = "HLG" if hdr != "HLG" and "BT.2020 (10-bit)" in transfer_characteristics: @@ -2175,7 +2898,9 @@ def get_hdr(self, mi, bdinfo): pass try: - if "Dolby Vision" in video_track.get('HDR_Format', '') or "Dolby Vision" in video_track.get('HDR_Format_String', ''): + if "Dolby Vision" in video_track.get( + "HDR_Format", "" + ) or "Dolby Vision" in video_track.get("HDR_Format_String", ""): dv = "DV" except Exception: pass @@ -2184,46 +2909,256 @@ def get_hdr(self, mi, bdinfo): return hdr def get_region(self, bdinfo, region=None): - label = bdinfo.get('label', bdinfo.get('title', bdinfo.get('path', ''))).replace('.', ' ') + label = bdinfo.get( + "label", bdinfo.get("title", bdinfo.get("path", "")) + ).replace(".", " ") if region is not None: region = region.upper() else: regions = { - 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', - 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', - 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', - 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', - 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', - 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', - 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', - 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', - 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', - 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', - 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', - 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', - 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', - 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', - 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', - 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', - 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', - 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', - 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', - 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', - 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', - 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', - 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', - 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', - 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', - 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', - 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', - 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', - 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', - 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', - 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', - 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', - 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', - 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', - 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR": "EUR" + "AFG": "AFG", + "AIA": "AIA", + "ALA": "ALA", + "ALG": "ALG", + "AND": "AND", + "ANG": "ANG", + "ARG": "ARG", + "ARM": "ARM", + "ARU": "ARU", + "ASA": "ASA", + "ATA": "ATA", + "ATF": "ATF", + "ATG": "ATG", + "AUS": "AUS", + "AUT": "AUT", + "AZE": "AZE", + "BAH": "BAH", + "BAN": "BAN", + "BDI": "BDI", + "BEL": "BEL", + "BEN": "BEN", + "BER": "BER", + "BES": "BES", + "BFA": "BFA", + "BHR": "BHR", + "BHU": "BHU", + "BIH": "BIH", + "BLM": "BLM", + "BLR": "BLR", + "BLZ": "BLZ", + "BOL": "BOL", + "BOT": "BOT", + "BRA": "BRA", + "BRB": "BRB", + "BRU": "BRU", + "BVT": "BVT", + "CAM": "CAM", + "CAN": "CAN", + "CAY": "CAY", + "CCK": "CCK", + "CEE": "CEE", + "CGO": "CGO", + "CHA": "CHA", + "CHI": "CHI", + "CHN": "CHN", + "CIV": "CIV", + "CMR": "CMR", + "COD": "COD", + "COK": "COK", + "COL": "COL", + "COM": "COM", + "CPV": "CPV", + "CRC": "CRC", + "CRO": "CRO", + "CTA": "CTA", + "CUB": "CUB", + "CUW": "CUW", + "CXR": "CXR", + "CYP": "CYP", + "DJI": "DJI", + "DMA": "DMA", + "DOM": "DOM", + "ECU": "ECU", + "EGY": "EGY", + "ENG": "ENG", + "EQG": "EQG", + "ERI": "ERI", + "ESH": "ESH", + "ESP": "ESP", + "ETH": "ETH", + "FIJ": "FIJ", + "FLK": "FLK", + "FRA": "FRA", + "FRO": "FRO", + "FSM": "FSM", + "GAB": "GAB", + "GAM": "GAM", + "GBR": "GBR", + "GEO": "GEO", + "GER": "GER", + "GGY": "GGY", + "GHA": "GHA", + "GIB": "GIB", + "GLP": "GLP", + "GNB": "GNB", + "GRE": "GRE", + "GRL": "GRL", + "GRN": "GRN", + "GUA": "GUA", + "GUF": "GUF", + "GUI": "GUI", + "GUM": "GUM", + "GUY": "GUY", + "HAI": "HAI", + "HKG": "HKG", + "HMD": "HMD", + "HON": "HON", + "HUN": "HUN", + "IDN": "IDN", + "IMN": "IMN", + "IND": "IND", + "IOT": "IOT", + "IRL": "IRL", + "IRN": "IRN", + "IRQ": "IRQ", + "ISL": "ISL", + "ISR": "ISR", + "ITA": "ITA", + "JAM": "JAM", + "JEY": "JEY", + "JOR": "JOR", + "JPN": "JPN", + "KAZ": "KAZ", + "KEN": "KEN", + "KGZ": "KGZ", + "KIR": "KIR", + "KNA": "KNA", + "KOR": "KOR", + "KSA": "KSA", + "KUW": "KUW", + "KVX": "KVX", + "LAO": "LAO", + "LBN": "LBN", + "LBR": "LBR", + "LBY": "LBY", + "LCA": "LCA", + "LES": "LES", + "LIE": "LIE", + "LKA": "LKA", + "LUX": "LUX", + "MAC": "MAC", + "MAD": "MAD", + "MAF": "MAF", + "MAR": "MAR", + "MAS": "MAS", + "MDA": "MDA", + "MDV": "MDV", + "MEX": "MEX", + "MHL": "MHL", + "MKD": "MKD", + "MLI": "MLI", + "MLT": "MLT", + "MNG": "MNG", + "MNP": "MNP", + "MON": "MON", + "MOZ": "MOZ", + "MRI": "MRI", + "MSR": "MSR", + "MTN": "MTN", + "MTQ": "MTQ", + "MWI": "MWI", + "MYA": "MYA", + "MYT": "MYT", + "NAM": "NAM", + "NCA": "NCA", + "NCL": "NCL", + "NEP": "NEP", + "NFK": "NFK", + "NIG": "NIG", + "NIR": "NIR", + "NIU": "NIU", + "NLD": "NLD", + "NOR": "NOR", + "NRU": "NRU", + "NZL": "NZL", + "OMA": "OMA", + "PAK": "PAK", + "PAN": "PAN", + "PAR": "PAR", + "PCN": "PCN", + "PER": "PER", + "PHI": "PHI", + "PLE": "PLE", + "PLW": "PLW", + "PNG": "PNG", + "POL": "POL", + "POR": "POR", + "PRK": "PRK", + "PUR": "PUR", + "QAT": "QAT", + "REU": "REU", + "ROU": "ROU", + "RSA": "RSA", + "RUS": "RUS", + "RWA": "RWA", + "SAM": "SAM", + "SCO": "SCO", + "SDN": "SDN", + "SEN": "SEN", + "SEY": "SEY", + "SGS": "SGS", + "SHN": "SHN", + "SIN": "SIN", + "SJM": "SJM", + "SLE": "SLE", + "SLV": "SLV", + "SMR": "SMR", + "SOL": "SOL", + "SOM": "SOM", + "SPM": "SPM", + "SRB": "SRB", + "SSD": "SSD", + "STP": "STP", + "SUI": "SUI", + "SUR": "SUR", + "SWZ": "SWZ", + "SXM": "SXM", + "SYR": "SYR", + "TAH": "TAH", + "TAN": "TAN", + "TCA": "TCA", + "TGA": "TGA", + "THA": "THA", + "TJK": "TJK", + "TKL": "TKL", + "TKM": "TKM", + "TLS": "TLS", + "TOG": "TOG", + "TRI": "TRI", + "TUN": "TUN", + "TUR": "TUR", + "TUV": "TUV", + "TWN": "TWN", + "UAE": "UAE", + "UGA": "UGA", + "UKR": "UKR", + "UMI": "UMI", + "URU": "URU", + "USA": "USA", + "UZB": "UZB", + "VAN": "VAN", + "VAT": "VAT", + "VEN": "VEN", + "VGB": "VGB", + "VIE": "VIE", + "VIN": "VIN", + "VIR": "VIR", + "WAL": "WAL", + "WLF": "WLF", + "YEM": "YEM", + "ZAM": "ZAM", + "ZIM": "ZIM", + "EUR": "EUR", } for key, value in regions.items(): if f" {key} " in label: @@ -2235,20 +3170,1726 @@ def get_region(self, bdinfo, region=None): def get_distributor(self, distributor_in): distributor_list = [ - '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDÉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', - 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SÉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ÉDITIONS', 'ARTE EDITIONS', 'ARTE VIDÉO', - 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTÖRUNG', - 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ÉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFÓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINÉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINÉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', - 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TÉLÉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', - 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車庫娛樂)', '車庫娛樂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDÉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HÄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', - 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", - 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MÉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', - 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', - 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRÖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', - 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDÉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', - 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VÉRTICE 360º', 'VÉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VÉRTIGO FILMS', 'VÉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', - 'VISUAL GROUP', 'VISUAL ENTERTAINMENT', 'VISUAL', 'VIVENDI VISUAL ENTERTAINMENT', 'VIVENDI VISUAL', 'VIVENDI', 'VIZ PICTURES', 'VIZ', 'VLMEDIA', 'VL MEDIA', 'VL', 'VOLGA', 'VVS FILMS', 'VVS', 'VZ HANDELS GMBH', 'VZ HANDELS', 'WARD RECORDS', 'WARD', 'WARNER BROS.', 'WARNER BROS', 'WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC', 'WARNER', 'WARNER MUSIC', 'WEA', 'WEINSTEIN COMPANY', 'WEINSTEIN', 'WELL GO USA', 'WELL GO', 'WELTKINO FILMVERLEIH', 'WEST VIDEO', 'WEST', 'WHITE PEARL MOVIES', 'WHITE PEARL', 'WICKED-VISION MEDIA', 'WICKED VISION MEDIA', 'WICKEDVISION MEDIA', 'WICKED-VISION', 'WICKED VISION', 'WICKEDVISION', 'WIENERWORLD', 'WILD BUNCH', 'WILD EYE RELEASING', 'WILD EYE', 'WILD SIDE VIDEO', 'WILD SIDE', 'WME', 'WOLFE VIDEO', 'WOLFE', 'WORD ON FIRE', 'WORKS FILM GROUP', 'WORLD WRESTLING', 'WVG MEDIEN', 'WWE STUDIOS', 'WWE', 'X RATED KULT', 'X-RATED KULT', 'X RATED CULT', 'X-RATED CULT', 'X RATED', 'X-RATED', 'XCESS', 'XLRATOR', 'XT VIDEO', 'XT', 'YAMATO VIDEO', 'YAMATO', 'YASH RAJ FILMS', 'YASH RAJS', 'ZEITGEIST FILMS', 'ZEITGEIST', 'ZENITH PICTURES', 'ZENITH', 'ZIMA', 'ZYLO', 'ZYX MUSIC', 'ZYX', - 'MASTERS OF CINEMA', 'MOC' + "01 DISTRIBUTION", + "100 DESTINATIONS TRAVEL FILM", + "101 FILMS", + "1FILMS", + "2 ENTERTAIN VIDEO", + "20TH CENTURY FOX", + "2L", + "3D CONTENT HUB", + "3D MEDIA", + "3L FILM", + "4DIGITAL", + "4DVD", + "4K ULTRA HD MOVIES", + "4K UHD", + "8-FILMS", + "84 ENTERTAINMENT", + "88 FILMS", + "@ANIME", + "ANIME", + "A CONTRACORRIENTE", + "A CONTRACORRIENTE FILMS", + "A&E HOME VIDEO", + "A&E", + "A&M RECORDS", + "A+E NETWORKS", + "A+R", + "A-FILM", + "AAA", + "AB VIDÉO", + "AB VIDEO", + "ABC - (AUSTRALIAN BROADCASTING CORPORATION)", + "ABC", + "ABKCO", + "ABSOLUT MEDIEN", + "ABSOLUTE", + "ACCENT FILM ENTERTAINMENT", + "ACCENTUS", + "ACORN MEDIA", + "AD VITAM", + "ADA", + "ADITYA VIDEOS", + "ADSO FILMS", + "AFM RECORDS", + "AGFA", + "AIX RECORDS", + "ALAMODE FILM", + "ALBA RECORDS", + "ALBANY RECORDS", + "ALBATROS", + "ALCHEMY", + "ALIVE", + "ALL ANIME", + "ALL INTERACTIVE ENTERTAINMENT", + "ALLEGRO", + "ALLIANCE", + "ALPHA MUSIC", + "ALTERDYSTRYBUCJA", + "ALTERED INNOCENCE", + "ALTITUDE FILM DISTRIBUTION", + "ALUCARD RECORDS", + "AMAZING D.C.", + "AMAZING DC", + "AMMO CONTENT", + "AMUSE SOFT ENTERTAINMENT", + "ANCONNECT", + "ANEC", + "ANIMATSU", + "ANIME HOUSE", + "ANIME LTD", + "ANIME WORKS", + "ANIMEIGO", + "ANIPLEX", + "ANOLIS ENTERTAINMENT", + "ANOTHER WORLD ENTERTAINMENT", + "AP INTERNATIONAL", + "APPLE", + "ARA MEDIA", + "ARBELOS", + "ARC ENTERTAINMENT", + "ARP SÉLECTION", + "ARP SELECTION", + "ARROW", + "ART SERVICE", + "ART VISION", + "ARTE ÉDITIONS", + "ARTE EDITIONS", + "ARTE VIDÉO", + "ARTE VIDEO", + "ARTHAUS MUSIK", + "ARTIFICIAL EYE", + "ARTSPLOITATION FILMS", + "ARTUS FILMS", + "ASCOT ELITE HOME ENTERTAINMENT", + "ASIA VIDEO", + "ASMIK ACE", + "ASTRO RECORDS & FILMWORKS", + "ASYLUM", + "ATLANTIC FILM", + "ATLANTIC RECORDS", + "ATLAS FILM", + "AUDIO VISUAL ENTERTAINMENT", + "AURO-3D CREATIVE LABEL", + "AURUM", + "AV VISIONEN", + "AV-JET", + "AVALON", + "AVENTI", + "AVEX TRAX", + "AXIOM", + "AXIS RECORDS", + "AYNGARAN", + "BAC FILMS", + "BACH FILMS", + "BANDAI VISUAL", + "BARCLAY", + "BBC", + "BRITISH BROADCASTING CORPORATION", + "BBI FILMS", + "BBI", + "BCI HOME ENTERTAINMENT", + "BEGGARS BANQUET", + "BEL AIR CLASSIQUES", + "BELGA FILMS", + "BELVEDERE", + "BENELUX FILM DISTRIBUTORS", + "BENNETT-WATT MEDIA", + "BERLIN CLASSICS", + "BERLINER PHILHARMONIKER RECORDINGS", + "BEST ENTERTAINMENT", + "BEYOND HOME ENTERTAINMENT", + "BFI VIDEO", + "BFI", + "BRITISH FILM INSTITUTE", + "BFS ENTERTAINMENT", + "BFS", + "BHAVANI", + "BIBER RECORDS", + "BIG HOME VIDEO", + "BILDSTÖRUNG", + "BILDSTORUNG", + "BILL ZEBUB", + "BIRNENBLATT", + "BIT WEL", + "BLACK BOX", + "BLACK HILL PICTURES", + "BLACK HILL", + "BLACK HOLE RECORDINGS", + "BLACK HOLE", + "BLAQOUT", + "BLAUFIELD MUSIC", + "BLAUFIELD", + "BLOCKBUSTER ENTERTAINMENT", + "BLOCKBUSTER", + "BLU PHASE MEDIA", + "BLU-RAY ONLY", + "BLU-RAY", + "BLURAY ONLY", + "BLURAY", + "BLUE GENTIAN RECORDS", + "BLUE KINO", + "BLUE UNDERGROUND", + "BMG/ARISTA", + "BMG", + "BMGARISTA", + "BMG ARISTA", + "ARISTA", + "ARISTA/BMG", + "ARISTABMG", + "ARISTA BMG", + "BONTON FILM", + "BONTON", + "BOOMERANG PICTURES", + "BOOMERANG", + "BQHL ÉDITIONS", + "BQHL EDITIONS", + "BQHL", + "BREAKING GLASS", + "BRIDGESTONE", + "BRINK", + "BROAD GREEN PICTURES", + "BROAD GREEN", + "BUSCH MEDIA GROUP", + "BUSCH", + "C MAJOR", + "C.B.S.", + "CAICHANG", + "CALIFÓRNIA FILMES", + "CALIFORNIA FILMES", + "CALIFORNIA", + "CAMEO", + "CAMERA OBSCURA", + "CAMERATA", + "CAMP MOTION PICTURES", + "CAMP MOTION", + "CAPELIGHT PICTURES", + "CAPELIGHT", + "CAPITOL", + "CAPITOL RECORDS", + "CAPRICCI", + "CARGO RECORDS", + "CARLOTTA FILMS", + "CARLOTTA", + "CARLOTA", + "CARMEN FILM", + "CASCADE", + "CATCHPLAY", + "CAULDRON FILMS", + "CAULDRON", + "CBS TELEVISION STUDIOS", + "CBS", + "CCTV", + "CCV ENTERTAINMENT", + "CCV", + "CD BABY", + "CD LAND", + "CECCHI GORI", + "CENTURY MEDIA", + "CHUAN XUN SHI DAI MULTIMEDIA", + "CINE-ASIA", + "CINÉART", + "CINEART", + "CINEDIGM", + "CINEFIL IMAGICA", + "CINEMA EPOCH", + "CINEMA GUILD", + "CINEMA LIBRE STUDIOS", + "CINEMA MONDO", + "CINEMATIC VISION", + "CINEPLOIT RECORDS", + "CINESTRANGE EXTREME", + "CITEL VIDEO", + "CITEL", + "CJ ENTERTAINMENT", + "CJ", + "CLASSIC MEDIA", + "CLASSICFLIX", + "CLASSICLINE", + "CLAUDIO RECORDS", + "CLEAR VISION", + "CLEOPATRA", + "CLOSE UP", + "CMS MEDIA LIMITED", + "CMV LASERVISION", + "CN ENTERTAINMENT", + "CODE RED", + "COHEN MEDIA GROUP", + "COHEN", + "COIN DE MIRE CINÉMA", + "COIN DE MIRE CINEMA", + "COLOSSEO FILM", + "COLUMBIA", + "COLUMBIA PICTURES", + "COLUMBIA/TRI-STAR", + "TRI-STAR", + "COMMERCIAL MARKETING", + "CONCORD MUSIC GROUP", + "CONCORDE VIDEO", + "CONDOR", + "CONSTANTIN FILM", + "CONSTANTIN", + "CONSTANTINO FILMES", + "CONSTANTINO", + "CONSTRUCTIVE MEDIA SERVICE", + "CONSTRUCTIVE", + "CONTENT ZONE", + "CONTENTS GATE", + "COQUEIRO VERDE", + "CORNERSTONE MEDIA", + "CORNERSTONE", + "CP DIGITAL", + "CREST MOVIES", + "CRITERION", + "CRITERION COLLECTION", + "CC", + "CRYSTAL CLASSICS", + "CULT EPICS", + "CULT FILMS", + "CULT VIDEO", + "CURZON FILM WORLD", + "D FILMS", + "D'AILLY COMPANY", + "DAILLY COMPANY", + "D AILLY COMPANY", + "D'AILLY", + "DAILLY", + "D AILLY", + "DA CAPO", + "DA MUSIC", + "DALL'ANGELO PICTURES", + "DALLANGELO PICTURES", + "DALL'ANGELO", + "DALL ANGELO PICTURES", + "DALL ANGELO", + "DAREDO", + "DARK FORCE ENTERTAINMENT", + "DARK FORCE", + "DARK SIDE RELEASING", + "DARK SIDE", + "DAZZLER MEDIA", + "DAZZLER", + "DCM PICTURES", + "DCM", + "DEAPLANETA", + "DECCA", + "DEEPJOY", + "DEFIANT SCREEN ENTERTAINMENT", + "DEFIANT SCREEN", + "DEFIANT", + "DELOS", + "DELPHIAN RECORDS", + "DELPHIAN", + "DELTA MUSIC & ENTERTAINMENT", + "DELTA MUSIC AND ENTERTAINMENT", + "DELTA MUSIC ENTERTAINMENT", + "DELTA MUSIC", + "DELTAMAC CO. LTD.", + "DELTAMAC CO LTD", + "DELTAMAC CO", + "DELTAMAC", + "DEMAND MEDIA", + "DEMAND", + "DEP", + "DEUTSCHE GRAMMOPHON", + "DFW", + "DGM", + "DIAPHANA", + "DIGIDREAMS STUDIOS", + "DIGIDREAMS", + "DIGITAL ENVIRONMENTS", + "DIGITAL", + "DISCOTEK MEDIA", + "DISCOVERY CHANNEL", + "DISCOVERY", + "DISK KINO", + "DISNEY / BUENA VISTA", + "DISNEY", + "BUENA VISTA", + "DISNEY BUENA VISTA", + "DISTRIBUTION SELECT", + "DIVISA", + "DNC ENTERTAINMENT", + "DNC", + "DOGWOOF", + "DOLMEN HOME VIDEO", + "DOLMEN", + "DONAU FILM", + "DONAU", + "DORADO FILMS", + "DORADO", + "DRAFTHOUSE FILMS", + "DRAFTHOUSE", + "DRAGON FILM ENTERTAINMENT", + "DRAGON ENTERTAINMENT", + "DRAGON FILM", + "DRAGON", + "DREAMWORKS", + "DRIVE ON RECORDS", + "DRIVE ON", + "DRIVE-ON", + "DRIVEON", + "DS MEDIA", + "DTP ENTERTAINMENT AG", + "DTP ENTERTAINMENT", + "DTP AG", + "DTP", + "DTS ENTERTAINMENT", + "DTS", + "DUKE MARKETING", + "DUKE VIDEO DISTRIBUTION", + "DUKE", + "DUTCH FILMWORKS", + "DUTCH", + "DVD INTERNATIONAL", + "DVD", + "DYBEX", + "DYNAMIC", + "DYNIT", + "E1 ENTERTAINMENT", + "E1", + "EAGLE ENTERTAINMENT", + "EAGLE HOME ENTERTAINMENT PVT.LTD.", + "EAGLE HOME ENTERTAINMENT PVTLTD", + "EAGLE HOME ENTERTAINMENT PVT LTD", + "EAGLE HOME ENTERTAINMENT", + "EAGLE PICTURES", + "EAGLE ROCK ENTERTAINMENT", + "EAGLE ROCK", + "EAGLE VISION MEDIA", + "EAGLE VISION", + "EARMUSIC", + "EARTH ENTERTAINMENT", + "EARTH", + "ECHO BRIDGE ENTERTAINMENT", + "ECHO BRIDGE", + "EDEL GERMANY GMBH", + "EDEL GERMANY", + "EDEL RECORDS", + "EDITION TONFILM", + "EDITIONS MONTPARNASSE", + "EDKO FILMS LTD.", + "EDKO FILMS LTD", + "EDKO FILMS", + "EDKO", + "EIN'S M&M CO", + "EINS M&M CO", + "EIN'S M&M", + "EINS M&M", + "ELEA-MEDIA", + "ELEA MEDIA", + "ELEA", + "ELECTRIC PICTURE", + "ELECTRIC", + "ELEPHANT FILMS", + "ELEPHANT", + "ELEVATION", + "EMI", + "EMON", + "EMS", + "EMYLIA", + "ENE MEDIA", + "ENE", + "ENTERTAINMENT IN VIDEO", + "ENTERTAINMENT IN", + "ENTERTAINMENT ONE", + "ENTERTAINMENT ONE FILMS CANADA INC.", + "ENTERTAINMENT ONE FILMS CANADA INC", + "ENTERTAINMENT ONE FILMS CANADA", + "ENTERTAINMENT ONE CANADA INC", + "ENTERTAINMENT ONE CANADA", + "ENTERTAINMENTONE", + "EONE", + "EOS", + "EPIC PICTURES", + "EPIC", + "EPIC RECORDS", + "ERATO", + "EROS", + "ESC EDITIONS", + "ESCAPI MEDIA BV", + "ESOTERIC RECORDINGS", + "ESPN FILMS", + "EUREKA ENTERTAINMENT", + "EUREKA", + "EURO PICTURES", + "EURO VIDEO", + "EUROARTS", + "EUROPA FILMES", + "EUROPA", + "EUROPACORP", + "EUROZOOM", + "EXCEL", + "EXPLOSIVE MEDIA", + "EXPLOSIVE", + "EXTRALUCID FILMS", + "EXTRALUCID", + "EYE SEE MOVIES", + "EYE SEE", + "EYK MEDIA", + "EYK", + "FABULOUS FILMS", + "FABULOUS", + "FACTORIS FILMS", + "FACTORIS", + "FARAO RECORDS", + "FARBFILM HOME ENTERTAINMENT", + "FARBFILM ENTERTAINMENT", + "FARBFILM HOME", + "FARBFILM", + "FEELGOOD ENTERTAINMENT", + "FEELGOOD", + "FERNSEHJUWELEN", + "FILM CHEST", + "FILM MEDIA", + "FILM MOVEMENT", + "FILM4", + "FILMART", + "FILMAURO", + "FILMAX", + "FILMCONFECT HOME ENTERTAINMENT", + "FILMCONFECT ENTERTAINMENT", + "FILMCONFECT HOME", + "FILMCONFECT", + "FILMEDIA", + "FILMJUWELEN", + "FILMOTEKA NARODAWA", + "FILMRISE", + "FINAL CUT ENTERTAINMENT", + "FINAL CUT", + "FIREHOUSE 12 RECORDS", + "FIREHOUSE 12", + "FIRST INTERNATIONAL PRODUCTION", + "FIRST INTERNATIONAL", + "FIRST LOOK STUDIOS", + "FIRST LOOK", + "FLAGMAN TRADE", + "FLASHSTAR FILMES", + "FLASHSTAR", + "FLICKER ALLEY", + "FNC ADD CULTURE", + "FOCUS FILMES", + "FOCUS", + "FOKUS MEDIA", + "FOKUSA", + "FOX PATHE EUROPA", + "FOX PATHE", + "FOX EUROPA", + "FOX/MGM", + "FOX MGM", + "MGM", + "MGM/FOX", + "FOX", + "FPE", + "FRANCE TÉLÉVISIONS DISTRIBUTION", + "FRANCE TELEVISIONS DISTRIBUTION", + "FRANCE TELEVISIONS", + "FRANCE", + "FREE DOLPHIN ENTERTAINMENT", + "FREE DOLPHIN", + "FREESTYLE DIGITAL MEDIA", + "FREESTYLE DIGITAL", + "FREESTYLE", + "FREMANTLE HOME ENTERTAINMENT", + "FREMANTLE ENTERTAINMENT", + "FREMANTLE HOME", + "FREMANTL", + "FRENETIC FILMS", + "FRENETIC", + "FRONTIER WORKS", + "FRONTIER", + "FRONTIERS MUSIC", + "FRONTIERS RECORDS", + "FS FILM OY", + "FS FILM", + "FULL MOON FEATURES", + "FULL MOON", + "FUN CITY EDITIONS", + "FUN CITY", + "FUNIMATION ENTERTAINMENT", + "FUNIMATION", + "FUSION", + "FUTUREFILM", + "G2 PICTURES", + "G2", + "GAGA COMMUNICATIONS", + "GAGA", + "GAIAM", + "GALAPAGOS", + "GAMMA HOME ENTERTAINMENT", + "GAMMA ENTERTAINMENT", + "GAMMA HOME", + "GAMMA", + "GARAGEHOUSE PICTURES", + "GARAGEHOUSE", + "GARAGEPLAY (車庫娛樂)", + "車庫娛樂", + "GARAGEPLAY (Che Ku Yu Le )", + "GARAGEPLAY", + "Che Ku Yu Le", + "GAUMONT", + "GEFFEN", + "GENEON ENTERTAINMENT", + "GENEON", + "GENEON UNIVERSAL ENTERTAINMENT", + "GENERAL VIDEO RECORDING", + "GLASS DOLL FILMS", + "GLASS DOLL", + "GLOBE MUSIC MEDIA", + "GLOBE MUSIC", + "GLOBE MEDIA", + "GLOBE", + "GO ENTERTAIN", + "GO", + "GOLDEN HARVEST", + "GOOD!MOVIES", + "GOOD! MOVIES", + "GOOD MOVIES", + "GRAPEVINE VIDEO", + "GRAPEVINE", + "GRASSHOPPER FILM", + "GRASSHOPPER FILMS", + "GRASSHOPPER", + "GRAVITAS VENTURES", + "GRAVITAS", + "GREAT MOVIES", + "GREAT", + "GREEN APPLE ENTERTAINMENT", + "GREEN ENTERTAINMENT", + "GREEN APPLE", + "GREEN", + "GREENNARAE MEDIA", + "GREENNARAE", + "GRINDHOUSE RELEASING", + "GRINDHOUSE", + "GRIND HOUSE", + "GRYPHON ENTERTAINMENT", + "GRYPHON", + "GUNPOWDER & SKY", + "GUNPOWDER AND SKY", + "GUNPOWDER SKY", + "GUNPOWDER + SKY", + "GUNPOWDER", + "HANABEE ENTERTAINMENT", + "HANABEE", + "HANNOVER HOUSE", + "HANNOVER", + "HANSESOUND", + "HANSE SOUND", + "HANSE", + "HAPPINET", + "HARMONIA MUNDI", + "HARMONIA", + "HBO", + "HDC", + "HEC", + "HELL & BACK RECORDINGS", + "HELL AND BACK RECORDINGS", + "HELL & BACK", + "HELL AND BACK", + "HEN'S TOOTH VIDEO", + "HENS TOOTH VIDEO", + "HEN'S TOOTH", + "HENS TOOTH", + "HIGH FLIERS", + "HIGHLIGHT", + "HILLSONG", + "HISTORY CHANNEL", + "HISTORY", + "HK VIDÉO", + "HK VIDEO", + "HK", + "HMH HAMBURGER MEDIEN HAUS", + "HAMBURGER MEDIEN HAUS", + "HMH HAMBURGER MEDIEN", + "HMH HAMBURGER", + "HMH", + "HOLLYWOOD CLASSIC ENTERTAINMENT", + "HOLLYWOOD CLASSIC", + "HOLLYWOOD PICTURES", + "HOLLYWOOD", + "HOPSCOTCH ENTERTAINMENT", + "HOPSCOTCH", + "HPM", + "HÄNNSLER CLASSIC", + "HANNSLER CLASSIC", + "HANNSLER", + "I-CATCHER", + "I CATCHER", + "ICATCHER", + "I-ON NEW MEDIA", + "I ON NEW MEDIA", + "ION NEW MEDIA", + "ION MEDIA", + "I-ON", + "ION", + "IAN PRODUCTIONS", + "IAN", + "ICESTORM", + "ICON FILM DISTRIBUTION", + "ICON DISTRIBUTION", + "ICON FILM", + "ICON", + "IDEALE AUDIENCE", + "IDEALE", + "IFC FILMS", + "IFC", + "IFILM", + "ILLUSIONS UNLTD.", + "ILLUSIONS UNLTD", + "ILLUSIONS", + "IMAGE ENTERTAINMENT", + "IMAGE", + "IMAGEM FILMES", + "IMAGEM", + "IMOVISION", + "IMPERIAL CINEPIX", + "IMPRINT", + "IMPULS HOME ENTERTAINMENT", + "IMPULS ENTERTAINMENT", + "IMPULS HOME", + "IMPULS", + "IN-AKUSTIK", + "IN AKUSTIK", + "INAKUSTIK", + "INCEPTION MEDIA GROUP", + "INCEPTION MEDIA", + "INCEPTION GROUP", + "INCEPTION", + "INDEPENDENT", + "INDICAN", + "INDIE RIGHTS", + "INDIE", + "INDIGO", + "INFO", + "INJOINGAN", + "INKED PICTURES", + "INKED", + "INSIDE OUT MUSIC", + "INSIDE MUSIC", + "INSIDE OUT", + "INSIDE", + "INTERCOM", + "INTERCONTINENTAL VIDEO", + "INTERCONTINENTAL", + "INTERGROOVE", + "INTERSCOPE", + "INVINCIBLE PICTURES", + "INVINCIBLE", + "ISLAND/MERCURY", + "ISLAND MERCURY", + "ISLANDMERCURY", + "ISLAND & MERCURY", + "ISLAND AND MERCURY", + "ISLAND", + "ITN", + "ITV DVD", + "ITV", + "IVC", + "IVE ENTERTAINMENT", + "IVE", + "J&R ADVENTURES", + "J&R", + "JR", + "JAKOB", + "JONU MEDIA", + "JONU", + "JRB PRODUCTIONS", + "JRB", + "JUST BRIDGE ENTERTAINMENT", + "JUST BRIDGE", + "JUST ENTERTAINMENT", + "JUST", + "KABOOM ENTERTAINMENT", + "KABOOM", + "KADOKAWA ENTERTAINMENT", + "KADOKAWA", + "KAIROS", + "KALEIDOSCOPE ENTERTAINMENT", + "KALEIDOSCOPE", + "KAM & RONSON ENTERPRISES", + "KAM & RONSON", + "KAM&RONSON ENTERPRISES", + "KAM&RONSON", + "KAM AND RONSON ENTERPRISES", + "KAM AND RONSON", + "KANA HOME VIDEO", + "KARMA FILMS", + "KARMA", + "KATZENBERGER", + "KAZE", + "KBS MEDIA", + "KBS", + "KD MEDIA", + "KD", + "KING MEDIA", + "KING", + "KING RECORDS", + "KINO LORBER", + "KINO", + "KINO SWIAT", + "KINOKUNIYA", + "KINOWELT HOME ENTERTAINMENT/DVD", + "KINOWELT HOME ENTERTAINMENT", + "KINOWELT ENTERTAINMENT", + "KINOWELT HOME DVD", + "KINOWELT ENTERTAINMENT/DVD", + "KINOWELT DVD", + "KINOWELT", + "KIT PARKER FILMS", + "KIT PARKER", + "KITTY MEDIA", + "KNM HOME ENTERTAINMENT", + "KNM ENTERTAINMENT", + "KNM HOME", + "KNM", + "KOBA FILMS", + "KOBA", + "KOCH ENTERTAINMENT", + "KOCH MEDIA", + "KOCH", + "KRAKEN RELEASING", + "KRAKEN", + "KSCOPE", + "KSM", + "KULTUR", + "L'ATELIER D'IMAGES", + "LATELIER D'IMAGES", + "L'ATELIER DIMAGES", + "LATELIER DIMAGES", + "L ATELIER D'IMAGES", + "L'ATELIER D IMAGES", + "L ATELIER D IMAGES", + "L'ATELIER", + "L ATELIER", + "LATELIER", + "LA AVENTURA AUDIOVISUAL", + "LA AVENTURA", + "LACE GROUP", + "LACE", + "LASER PARADISE", + "LAYONS", + "LCJ EDITIONS", + "LCJ", + "LE CHAT QUI FUME", + "LE PACTE", + "LEDICK FILMHANDEL", + "LEGEND", + "LEOMARK STUDIOS", + "LEOMARK", + "LEONINE FILMS", + "LEONINE", + "LICHTUNG MEDIA LTD", + "LICHTUNG LTD", + "LICHTUNG MEDIA LTD.", + "LICHTUNG LTD.", + "LICHTUNG MEDIA", + "LICHTUNG", + "LIGHTHOUSE HOME ENTERTAINMENT", + "LIGHTHOUSE ENTERTAINMENT", + "LIGHTHOUSE HOME", + "LIGHTHOUSE", + "LIGHTYEAR", + "LIONSGATE FILMS", + "LIONSGATE", + "LIZARD CINEMA TRADE", + "LLAMENTOL", + "LOBSTER FILMS", + "LOBSTER", + "LOGON", + "LORBER FILMS", + "LORBER", + "LOS BANDITOS FILMS", + "LOS BANDITOS", + "LOUD & PROUD RECORDS", + "LOUD AND PROUD RECORDS", + "LOUD & PROUD", + "LOUD AND PROUD", + "LSO LIVE", + "LUCASFILM", + "LUCKY RED", + "LUMIÈRE HOME ENTERTAINMENT", + "LUMIERE HOME ENTERTAINMENT", + "LUMIERE ENTERTAINMENT", + "LUMIERE HOME", + "LUMIERE", + "M6 VIDEO", + "M6", + "MAD DIMENSION", + "MADMAN ENTERTAINMENT", + "MADMAN", + "MAGIC BOX", + "MAGIC PLAY", + "MAGNA HOME ENTERTAINMENT", + "MAGNA ENTERTAINMENT", + "MAGNA HOME", + "MAGNA", + "MAGNOLIA PICTURES", + "MAGNOLIA", + "MAIDEN JAPAN", + "MAIDEN", + "MAJENG MEDIA", + "MAJENG", + "MAJESTIC HOME ENTERTAINMENT", + "MAJESTIC ENTERTAINMENT", + "MAJESTIC HOME", + "MAJESTIC", + "MANGA HOME ENTERTAINMENT", + "MANGA ENTERTAINMENT", + "MANGA HOME", + "MANGA", + "MANTA LAB", + "MAPLE STUDIOS", + "MAPLE", + "MARCO POLO PRODUCTION", + "MARCO POLO", + "MARIINSKY", + "MARVEL STUDIOS", + "MARVEL", + "MASCOT RECORDS", + "MASCOT", + "MASSACRE VIDEO", + "MASSACRE", + "MATCHBOX", + "MATRIX D", + "MAXAM", + "MAYA HOME ENTERTAINMENT", + "MAYA ENTERTAINMENT", + "MAYA HOME", + "MAYAT", + "MDG", + "MEDIA BLASTERS", + "MEDIA FACTORY", + "MEDIA TARGET DISTRIBUTION", + "MEDIA TARGET", + "MEDIAINVISION", + "MEDIATOON", + "MEDIATRES ESTUDIO", + "MEDIATRES STUDIO", + "MEDIATRES", + "MEDICI ARTS", + "MEDICI CLASSICS", + "MEDIUMRARE ENTERTAINMENT", + "MEDIUMRARE", + "MEDUSA", + "MEGASTAR", + "MEI AH", + "MELI MÉDIAS", + "MELI MEDIAS", + "MEMENTO FILMS", + "MEMENTO", + "MENEMSHA FILMS", + "MENEMSHA", + "MERCURY", + "MERCURY STUDIOS", + "MERGE SOFT PRODUCTIONS", + "MERGE PRODUCTIONS", + "MERGE SOFT", + "MERGE", + "METAL BLADE RECORDS", + "METAL BLADE", + "METEOR", + "METRO-GOLDWYN-MAYER", + "METRO GOLDWYN MAYER", + "METROGOLDWYNMAYER", + "METRODOME VIDEO", + "METRODOME", + "METROPOLITAN", + "MFA+", + "MFA", + "MIG FILMGROUP", + "MIG", + "MILESTONE", + "MILL CREEK ENTERTAINMENT", + "MILL CREEK", + "MILLENNIUM MEDIA", + "MILLENNIUM", + "MIRAGE ENTERTAINMENT", + "MIRAGE", + "MIRAMAX", + "MISTERIYA ZVUKA", + "MK2", + "MODE RECORDS", + "MODE", + "MOMENTUM PICTURES", + "MONDO HOME ENTERTAINMENT", + "MONDO ENTERTAINMENT", + "MONDO HOME", + "MONDO MACABRO", + "MONGREL MEDIA", + "MONOLIT", + "MONOLITH VIDEO", + "MONOLITH", + "MONSTER PICTURES", + "MONSTER", + "MONTEREY VIDEO", + "MONTEREY", + "MONUMENT RELEASING", + "MONUMENT", + "MORNINGSTAR", + "MORNING STAR", + "MOSERBAER", + "MOVIEMAX", + "MOVINSIDE", + "MPI MEDIA GROUP", + "MPI MEDIA", + "MPI", + "MR. BONGO FILMS", + "MR BONGO FILMS", + "MR BONGO", + "MRG (MERIDIAN)", + "MRG MERIDIAN", + "MRG", + "MERIDIAN", + "MUBI", + "MUG SHOT PRODUCTIONS", + "MUG SHOT", + "MULTIMUSIC", + "MULTI-MUSIC", + "MULTI MUSIC", + "MUSE", + "MUSIC BOX FILMS", + "MUSIC BOX", + "MUSICBOX", + "MUSIC BROKERS", + "MUSIC THEORIES", + "MUSIC VIDEO DISTRIBUTORS", + "MUSIC VIDEO", + "MUSTANG ENTERTAINMENT", + "MUSTANG", + "MVD VISUAL", + "MVD", + "MVD/VSC", + "MVL", + "MVM ENTERTAINMENT", + "MVM", + "MYNDFORM", + "MYSTIC NIGHT PICTURES", + "MYSTIC NIGHT", + "NAMELESS MEDIA", + "NAMELESS", + "NAPALM RECORDS", + "NAPALM", + "NATIONAL ENTERTAINMENT MEDIA", + "NATIONAL ENTERTAINMENT", + "NATIONAL MEDIA", + "NATIONAL FILM ARCHIVE", + "NATIONAL ARCHIVE", + "NATIONAL FILM", + "NATIONAL GEOGRAPHIC", + "NAT GEO TV", + "NAT GEO", + "NGO", + "NAXOS", + "NBCUNIVERSAL ENTERTAINMENT JAPAN", + "NBC UNIVERSAL ENTERTAINMENT JAPAN", + "NBCUNIVERSAL JAPAN", + "NBC UNIVERSAL JAPAN", + "NBC JAPAN", + "NBO ENTERTAINMENT", + "NBO", + "NEOS", + "NETFLIX", + "NETWORK", + "NEW BLOOD", + "NEW DISC", + "NEW KSM", + "NEW LINE CINEMA", + "NEW LINE", + "NEW MOVIE TRADING CO. LTD", + "NEW MOVIE TRADING CO LTD", + "NEW MOVIE TRADING CO", + "NEW MOVIE TRADING", + "NEW WAVE FILMS", + "NEW WAVE", + "NFI", + "NHK", + "NIPPONART", + "NIS AMERICA", + "NJUTAFILMS", + "NOBLE ENTERTAINMENT", + "NOBLE", + "NORDISK FILM", + "NORDISK", + "NORSK FILM", + "NORSK", + "NORTH AMERICAN MOTION PICTURES", + "NOS AUDIOVISUAIS", + "NOTORIOUS PICTURES", + "NOTORIOUS", + "NOVA MEDIA", + "NOVA", + "NOVA SALES AND DISTRIBUTION", + "NOVA SALES & DISTRIBUTION", + "NSM", + "NSM RECORDS", + "NUCLEAR BLAST", + "NUCLEUS FILMS", + "NUCLEUS", + "OBERLIN MUSIC", + "OBERLIN", + "OBRAS-PRIMAS DO CINEMA", + "OBRAS PRIMAS DO CINEMA", + "OBRASPRIMAS DO CINEMA", + "OBRAS-PRIMAS CINEMA", + "OBRAS PRIMAS CINEMA", + "OBRASPRIMAS CINEMA", + "OBRAS-PRIMAS", + "OBRAS PRIMAS", + "OBRASPRIMAS", + "ODEON", + "OFDB FILMWORKS", + "OFDB", + "OLIVE FILMS", + "OLIVE", + "ONDINE", + "ONSCREEN FILMS", + "ONSCREEN", + "OPENING DISTRIBUTION", + "OPERA AUSTRALIA", + "OPTIMUM HOME ENTERTAINMENT", + "OPTIMUM ENTERTAINMENT", + "OPTIMUM HOME", + "OPTIMUM", + "OPUS ARTE", + "ORANGE STUDIO", + "ORANGE", + "ORLANDO EASTWOOD FILMS", + "ORLANDO FILMS", + "ORLANDO EASTWOOD", + "ORLANDO", + "ORUSTAK PICTURES", + "ORUSTAK", + "OSCILLOSCOPE PICTURES", + "OSCILLOSCOPE", + "OUTPLAY", + "PALISADES TARTAN", + "PAN VISION", + "PANVISION", + "PANAMINT CINEMA", + "PANAMINT", + "PANDASTORM ENTERTAINMENT", + "PANDA STORM ENTERTAINMENT", + "PANDASTORM", + "PANDA STORM", + "PANDORA FILM", + "PANDORA", + "PANEGYRIC", + "PANORAMA", + "PARADE DECK FILMS", + "PARADE DECK", + "PARADISE", + "PARADISO FILMS", + "PARADOX", + "PARAMOUNT PICTURES", + "PARAMOUNT", + "PARIS FILMES", + "PARIS FILMS", + "PARIS", + "PARK CIRCUS", + "PARLOPHONE", + "PASSION RIVER", + "PATHE DISTRIBUTION", + "PATHE", + "PBS", + "PEACE ARCH TRINITY", + "PECCADILLO PICTURES", + "PEPPERMINT", + "PHASE 4 FILMS", + "PHASE 4", + "PHILHARMONIA BAROQUE", + "PICTURE HOUSE ENTERTAINMENT", + "PICTURE ENTERTAINMENT", + "PICTURE HOUSE", + "PICTURE", + "PIDAX", + "PINK FLOYD RECORDS", + "PINK FLOYD", + "PINNACLE FILMS", + "PINNACLE", + "PLAIN", + "PLATFORM ENTERTAINMENT LIMITED", + "PLATFORM ENTERTAINMENT LTD", + "PLATFORM ENTERTAINMENT LTD.", + "PLATFORM ENTERTAINMENT", + "PLATFORM", + "PLAYARTE", + "PLG UK CLASSICS", + "PLG UK", + "PLG", + "POLYBAND & TOPPIC VIDEO/WVG", + "POLYBAND AND TOPPIC VIDEO/WVG", + "POLYBAND & TOPPIC VIDEO WVG", + "POLYBAND & TOPPIC VIDEO AND WVG", + "POLYBAND & TOPPIC VIDEO & WVG", + "POLYBAND AND TOPPIC VIDEO WVG", + "POLYBAND AND TOPPIC VIDEO AND WVG", + "POLYBAND AND TOPPIC VIDEO & WVG", + "POLYBAND & TOPPIC VIDEO", + "POLYBAND AND TOPPIC VIDEO", + "POLYBAND & TOPPIC", + "POLYBAND AND TOPPIC", + "POLYBAND", + "WVG", + "POLYDOR", + "PONY", + "PONY CANYON", + "POTEMKINE", + "POWERHOUSE FILMS", + "POWERHOUSE", + "POWERSTATIOM", + "PRIDE & JOY", + "PRIDE AND JOY", + "PRINZ MEDIA", + "PRINZ", + "PRIS AUDIOVISUAIS", + "PRO VIDEO", + "PRO-VIDEO", + "PRO-MOTION", + "PRO MOTION", + "PROD. JRB", + "PROD JRB", + "PRODISC", + "PROKINO", + "PROVOGUE RECORDS", + "PROVOGUE", + "PROWARE", + "PULP VIDEO", + "PULP", + "PULSE VIDEO", + "PULSE", + "PURE AUDIO RECORDINGS", + "PURE AUDIO", + "PURE FLIX ENTERTAINMENT", + "PURE FLIX", + "PURE ENTERTAINMENT", + "PYRAMIDE VIDEO", + "PYRAMIDE", + "QUALITY FILMS", + "QUALITY", + "QUARTO VALLEY RECORDS", + "QUARTO VALLEY", + "QUESTAR", + "R SQUARED FILMS", + "R SQUARED", + "RAPID EYE MOVIES", + "RAPID EYE", + "RARO VIDEO", + "RARO", + "RAROVIDEO U.S.", + "RAROVIDEO US", + "RARO VIDEO US", + "RARO VIDEO U.S.", + "RARO U.S.", + "RARO US", + "RAVEN BANNER RELEASING", + "RAVEN BANNER", + "RAVEN", + "RAZOR DIGITAL ENTERTAINMENT", + "RAZOR DIGITAL", + "RCA", + "RCO LIVE", + "RCO", + "RCV", + "REAL GONE MUSIC", + "REAL GONE", + "REANIMEDIA", + "REANI MEDIA", + "REDEMPTION", + "REEL", + "RELIANCE HOME VIDEO & GAMES", + "RELIANCE HOME VIDEO AND GAMES", + "RELIANCE HOME VIDEO", + "RELIANCE VIDEO", + "RELIANCE HOME", + "RELIANCE", + "REM CULTURE", + "REMAIN IN LIGHT", + "REPRISE", + "RESEN", + "RETROMEDIA", + "REVELATION FILMS LTD.", + "REVELATION FILMS LTD", + "REVELATION FILMS", + "REVELATION LTD.", + "REVELATION LTD", + "REVELATION", + "REVOLVER ENTERTAINMENT", + "REVOLVER", + "RHINO MUSIC", + "RHINO", + "RHV", + "RIGHT STUF", + "RIMINI EDITIONS", + "RISING SUN MEDIA", + "RLJ ENTERTAINMENT", + "RLJ", + "ROADRUNNER RECORDS", + "ROADSHOW ENTERTAINMENT", + "ROADSHOW", + "RONE", + "RONIN FLIX", + "ROTANA HOME ENTERTAINMENT", + "ROTANA ENTERTAINMENT", + "ROTANA HOME", + "ROTANA", + "ROUGH TRADE", + "ROUNDER", + "SAFFRON HILL FILMS", + "SAFFRON HILL", + "SAFFRON", + "SAMUEL GOLDWYN FILMS", + "SAMUEL GOLDWYN", + "SAN FRANCISCO SYMPHONY", + "SANDREW METRONOME", + "SAPHRANE", + "SAVOR", + "SCANBOX ENTERTAINMENT", + "SCANBOX", + "SCENIC LABS", + "SCHRÖDERMEDIA", + "SCHRODERMEDIA", + "SCHRODER MEDIA", + "SCORPION RELEASING", + "SCORPION", + "SCREAM TEAM RELEASING", + "SCREAM TEAM", + "SCREEN MEDIA", + "SCREEN", + "SCREENBOUND PICTURES", + "SCREENBOUND", + "SCREENWAVE MEDIA", + "SCREENWAVE", + "SECOND RUN", + "SECOND SIGHT", + "SEEDSMAN GROUP", + "SELECT VIDEO", + "SELECTA VISION", + "SENATOR", + "SENTAI FILMWORKS", + "SENTAI", + "SEVEN7", + "SEVERIN FILMS", + "SEVERIN", + "SEVILLE", + "SEYONS ENTERTAINMENT", + "SEYONS", + "SF STUDIOS", + "SGL ENTERTAINMENT", + "SGL", + "SHAMELESS", + "SHAMROCK MEDIA", + "SHAMROCK", + "SHANGHAI EPIC MUSIC ENTERTAINMENT", + "SHANGHAI EPIC ENTERTAINMENT", + "SHANGHAI EPIC MUSIC", + "SHANGHAI MUSIC ENTERTAINMENT", + "SHANGHAI ENTERTAINMENT", + "SHANGHAI MUSIC", + "SHANGHAI", + "SHEMAROO", + "SHOCHIKU", + "SHOCK", + "SHOGAKU KAN", + "SHOUT FACTORY", + "SHOUT! FACTORY", + "SHOUT", + "SHOUT!", + "SHOWBOX", + "SHOWTIME ENTERTAINMENT", + "SHOWTIME", + "SHRIEK SHOW", + "SHUDDER", + "SIDONIS", + "SIDONIS CALYSTA", + "SIGNAL ONE ENTERTAINMENT", + "SIGNAL ONE", + "SIGNATURE ENTERTAINMENT", + "SIGNATURE", + "SILVER VISION", + "SINISTER FILM", + "SINISTER", + "SIREN VISUAL ENTERTAINMENT", + "SIREN VISUAL", + "SIREN ENTERTAINMENT", + "SIREN", + "SKANI", + "SKY DIGI", + "SLASHER // VIDEO", + "SLASHER / VIDEO", + "SLASHER VIDEO", + "SLASHER", + "SLOVAK FILM INSTITUTE", + "SLOVAK FILM", + "SFI", + "SM LIFE DESIGN GROUP", + "SMOOTH PICTURES", + "SMOOTH", + "SNAPPER MUSIC", + "SNAPPER", + "SODA PICTURES", + "SODA", + "SONO LUMINUS", + "SONY MUSIC", + "SONY PICTURES", + "SONY", + "SONY PICTURES CLASSICS", + "SONY CLASSICS", + "SOUL MEDIA", + "SOUL", + "SOULFOOD MUSIC DISTRIBUTION", + "SOULFOOD DISTRIBUTION", + "SOULFOOD MUSIC", + "SOULFOOD", + "SOYUZ", + "SPECTRUM", + "SPENTZOS FILM", + "SPENTZOS", + "SPIRIT ENTERTAINMENT", + "SPIRIT", + "SPIRIT MEDIA GMBH", + "SPIRIT MEDIA", + "SPLENDID ENTERTAINMENT", + "SPLENDID FILM", + "SPO", + "SQUARE ENIX", + "SRI BALAJI VIDEO", + "SRI BALAJI", + "SRI", + "SRI VIDEO", + "SRS CINEMA", + "SRS", + "SSO RECORDINGS", + "SSO", + "ST2 MUSIC", + "ST2", + "STAR MEDIA ENTERTAINMENT", + "STAR ENTERTAINMENT", + "STAR MEDIA", + "STAR", + "STARLIGHT", + "STARZ / ANCHOR BAY", + "STARZ ANCHOR BAY", + "STARZ", + "ANCHOR BAY", + "STER KINEKOR", + "STERLING ENTERTAINMENT", + "STERLING", + "STINGRAY", + "STOCKFISCH RECORDS", + "STOCKFISCH", + "STRAND RELEASING", + "STRAND", + "STUDIO 4K", + "STUDIO CANAL", + "STUDIO GHIBLI", + "GHIBLI", + "STUDIO HAMBURG ENTERPRISES", + "HAMBURG ENTERPRISES", + "STUDIO HAMBURG", + "HAMBURG", + "STUDIO S", + "SUBKULTUR ENTERTAINMENT", + "SUBKULTUR", + "SUEVIA FILMS", + "SUEVIA", + "SUMMIT ENTERTAINMENT", + "SUMMIT", + "SUNFILM ENTERTAINMENT", + "SUNFILM", + "SURROUND RECORDS", + "SURROUND", + "SVENSK FILMINDUSTRI", + "SVENSK", + "SWEN FILMES", + "SWEN FILMS", + "SWEN", + "SYNAPSE FILMS", + "SYNAPSE", + "SYNDICADO", + "SYNERGETIC", + "T- SERIES", + "T-SERIES", + "T SERIES", + "TSERIES", + "T.V.P.", + "TVP", + "TACET RECORDS", + "TACET", + "TAI SENG", + "TAI SHENG", + "TAKEONE", + "TAKESHOBO", + "TAMASA DIFFUSION", + "TC ENTERTAINMENT", + "TC", + "TDK", + "TEAM MARKETING", + "TEATRO REAL", + "TEMA DISTRIBUCIONES", + "TEMPE DIGITAL", + "TF1 VIDÉO", + "TF1 VIDEO", + "TF1", + "THE BLU", + "BLU", + "THE ECSTASY OF FILMS", + "THE FILM DETECTIVE", + "FILM DETECTIVE", + "THE JOKERS", + "JOKERS", + "THE ON", + "ON", + "THIMFILM", + "THIM FILM", + "THIM", + "THIRD WINDOW FILMS", + "THIRD WINDOW", + "3RD WINDOW FILMS", + "3RD WINDOW", + "THUNDERBEAN ANIMATION", + "THUNDERBEAN", + "THUNDERBIRD RELEASING", + "THUNDERBIRD", + "TIBERIUS FILM", + "TIME LIFE", + "TIMELESS MEDIA GROUP", + "TIMELESS MEDIA", + "TIMELESS GROUP", + "TIMELESS", + "TLA RELEASING", + "TLA", + "TOBIS FILM", + "TOBIS", + "TOEI", + "TOHO", + "TOKYO SHOCK", + "TOKYO", + "TONPOOL MEDIEN GMBH", + "TONPOOL MEDIEN", + "TOPICS ENTERTAINMENT", + "TOPICS", + "TOUCHSTONE PICTURES", + "TOUCHSTONE", + "TRANSMISSION FILMS", + "TRANSMISSION", + "TRAVEL VIDEO STORE", + "TRIART", + "TRIGON FILM", + "TRIGON", + "TRINITY HOME ENTERTAINMENT", + "TRINITY ENTERTAINMENT", + "TRINITY HOME", + "TRINITY", + "TRIPICTURES", + "TRI-PICTURES", + "TRI PICTURES", + "TROMA", + "TURBINE MEDIEN", + "TURTLE RECORDS", + "TURTLE", + "TVA FILMS", + "TVA", + "TWILIGHT TIME", + "TWILIGHT", + "TT", + "TWIN CO., LTD.", + "TWIN CO, LTD.", + "TWIN CO., LTD", + "TWIN CO, LTD", + "TWIN CO LTD", + "TWIN LTD", + "TWIN CO.", + "TWIN CO", + "TWIN", + "UCA", + "UDR", + "UEK", + "UFA/DVD", + "UFA DVD", + "UFADVD", + "UGC PH", + "ULTIMATE3DHEAVEN", + "ULTRA", + "UMBRELLA ENTERTAINMENT", + "UMBRELLA", + "UMC", + "UNCORK'D ENTERTAINMENT", + "UNCORKD ENTERTAINMENT", + "UNCORK D ENTERTAINMENT", + "UNCORK'D", + "UNCORK D", + "UNCORKD", + "UNEARTHED FILMS", + "UNEARTHED", + "UNI DISC", + "UNIMUNDOS", + "UNITEL", + "UNIVERSAL MUSIC", + "UNIVERSAL SONY PICTURES HOME ENTERTAINMENT", + "UNIVERSAL SONY PICTURES ENTERTAINMENT", + "UNIVERSAL SONY PICTURES HOME", + "UNIVERSAL SONY PICTURES", + "UNIVERSAL HOME ENTERTAINMENT", + "UNIVERSAL ENTERTAINMENT", + "UNIVERSAL HOME", + "UNIVERSAL STUDIOS", + "UNIVERSAL", + "UNIVERSE LASER & VIDEO CO.", + "UNIVERSE LASER AND VIDEO CO.", + "UNIVERSE LASER & VIDEO CO", + "UNIVERSE LASER AND VIDEO CO", + "UNIVERSE LASER CO.", + "UNIVERSE LASER CO", + "UNIVERSE LASER", + "UNIVERSUM FILM", + "UNIVERSUM", + "UTV", + "VAP", + "VCI", + "VENDETTA FILMS", + "VENDETTA", + "VERSÁTIL HOME VIDEO", + "VERSÁTIL VIDEO", + "VERSÁTIL HOME", + "VERSÁTIL", + "VERSATIL HOME VIDEO", + "VERSATIL VIDEO", + "VERSATIL HOME", + "VERSATIL", + "VERTICAL ENTERTAINMENT", + "VERTICAL", + "VÉRTICE 360º", + "VÉRTICE 360", + "VERTICE 360o", + "VERTICE 360", + "VERTIGO BERLIN", + "VÉRTIGO FILMS", + "VÉRTIGO", + "VERTIGO FILMS", + "VERTIGO", + "VERVE PICTURES", + "VIA VISION ENTERTAINMENT", + "VIA VISION", + "VICOL ENTERTAINMENT", + "VICOL", + "VICOM", + "VICTOR ENTERTAINMENT", + "VICTOR", + "VIDEA CDE", + "VIDEO FILM EXPRESS", + "VIDEO FILM", + "VIDEO EXPRESS", + "VIDEO MUSIC, INC.", + "VIDEO MUSIC, INC", + "VIDEO MUSIC INC.", + "VIDEO MUSIC INC", + "VIDEO MUSIC", + "VIDEO SERVICE CORP.", + "VIDEO SERVICE CORP", + "VIDEO SERVICE", + "VIDEO TRAVEL", + "VIDEOMAX", + "VIDEO MAX", + "VII PILLARS ENTERTAINMENT", + "VII PILLARS", + "VILLAGE FILMS", + "VINEGAR SYNDROME", + "VINEGAR", + "VS", + "VINNY MOVIES", + "VINNY", + "VIRGIL FILMS & ENTERTAINMENT", + "VIRGIL FILMS AND ENTERTAINMENT", + "VIRGIL ENTERTAINMENT", + "VIRGIL FILMS", + "VIRGIL", + "VIRGIN RECORDS", + "VIRGIN", + "VISION FILMS", + "VISION", + "VISUAL ENTERTAINMENT GROUP", + "VISUAL GROUP", + "VISUAL ENTERTAINMENT", + "VISUAL", + "VIVENDI VISUAL ENTERTAINMENT", + "VIVENDI VISUAL", + "VIVENDI", + "VIZ PICTURES", + "VIZ", + "VLMEDIA", + "VL MEDIA", + "VL", + "VOLGA", + "VVS FILMS", + "VVS", + "VZ HANDELS GMBH", + "VZ HANDELS", + "WARD RECORDS", + "WARD", + "WARNER BROS.", + "WARNER BROS", + "WARNER ARCHIVE", + "WARNER ARCHIVE COLLECTION", + "WAC", + "WARNER", + "WARNER MUSIC", + "WEA", + "WEINSTEIN COMPANY", + "WEINSTEIN", + "WELL GO USA", + "WELL GO", + "WELTKINO FILMVERLEIH", + "WEST VIDEO", + "WEST", + "WHITE PEARL MOVIES", + "WHITE PEARL", + "WICKED-VISION MEDIA", + "WICKED VISION MEDIA", + "WICKEDVISION MEDIA", + "WICKED-VISION", + "WICKED VISION", + "WICKEDVISION", + "WIENERWORLD", + "WILD BUNCH", + "WILD EYE RELEASING", + "WILD EYE", + "WILD SIDE VIDEO", + "WILD SIDE", + "WME", + "WOLFE VIDEO", + "WOLFE", + "WORD ON FIRE", + "WORKS FILM GROUP", + "WORLD WRESTLING", + "WVG MEDIEN", + "WWE STUDIOS", + "WWE", + "X RATED KULT", + "X-RATED KULT", + "X RATED CULT", + "X-RATED CULT", + "X RATED", + "X-RATED", + "XCESS", + "XLRATOR", + "XT VIDEO", + "XT", + "YAMATO VIDEO", + "YAMATO", + "YASH RAJ FILMS", + "YASH RAJS", + "ZEITGEIST FILMS", + "ZEITGEIST", + "ZENITH PICTURES", + "ZENITH", + "ZIMA", + "ZYLO", + "ZYX MUSIC", + "ZYX", + "MASTERS OF CINEMA", + "MOC", ] distributor_out = "" if distributor_in not in [None, "None", ""]: @@ -2262,47 +4903,47 @@ def get_video_codec(self, bdinfo): "MPEG-2 Video": "MPEG-2", "MPEG-4 AVC Video": "AVC", "MPEG-H HEVC Video": "HEVC", - "VC-1 Video": "VC-1" + "VC-1 Video": "VC-1", } - codec = codecs.get(bdinfo['video'][0]['codec'], "") + codec = codecs.get(bdinfo["video"][0]["codec"], "") return codec def get_video_encode(self, mi, type, bdinfo): video_encode = "" codec = "" - bit_depth = '0' + bit_depth = "0" has_encode_settings = False try: - format = mi['media']['track'][1]['Format'] - format_profile = mi['media']['track'][1].get('Format_Profile', format) - if mi['media']['track'][1].get('Encoded_Library_Settings', None): + format = mi["media"]["track"][1]["Format"] + format_profile = mi["media"]["track"][1].get("Format_Profile", format) + if mi["media"]["track"][1].get("Encoded_Library_Settings", None): has_encode_settings = True - bit_depth = mi['media']['track'][1].get('BitDepth', '0') + bit_depth = mi["media"]["track"][1].get("BitDepth", "0") except Exception: - format = bdinfo['video'][0]['codec'] - format_profile = bdinfo['video'][0]['profile'] + format = bdinfo["video"][0]["codec"] + format_profile = bdinfo["video"][0]["profile"] if type in ("ENCODE", "WEBRIP"): # ENCODE or WEBRIP - if format == 'AVC': - codec = 'x264' - elif format == 'HEVC': - codec = 'x265' - elif format == 'AV1': - codec = 'AV1' - elif type in ('WEBDL', 'HDTV'): # WEB-DL - if format == 'AVC': - codec = 'H.264' - elif format == 'HEVC': - codec = 'H.265' - elif format == 'AV1': - codec = 'AV1' - - if type == 'HDTV' and has_encode_settings is True: - codec = codec.replace('H.', 'x') + if format == "AVC": + codec = "x264" + elif format == "HEVC": + codec = "x265" + elif format == "AV1": + codec = "AV1" + elif type in ("WEBDL", "HDTV"): # WEB-DL + if format == "AVC": + codec = "H.264" + elif format == "HEVC": + codec = "H.265" + elif format == "AV1": + codec = "AV1" + + if type == "HDTV" and has_encode_settings is True: + codec = codec.replace("H.", "x") elif format == "VP9": codec = "VP9" elif format == "VC-1": codec = "VC-1" - if format_profile == 'High 10': + if format_profile == "High 10": profile = "Hi10P" else: profile = "" @@ -2313,23 +4954,23 @@ def get_video_encode(self, mi, type, bdinfo): return video_encode, video_codec, has_encode_settings, bit_depth def get_edition(self, video, bdinfo, filelist, manual_edition): - if video.lower().startswith('dc'): - video = video.replace('dc', '', 1) + if video.lower().startswith("dc"): + video = video.replace("dc", "", 1) guess = guessit(video) - tag = guess.get('release_group', 'NOGROUP') + tag = guess.get("release_group", "NOGROUP") repack = "" edition = "" if bdinfo is not None: try: - edition = guessit(bdinfo['label'])['edition'] + edition = guessit(bdinfo["label"])["edition"] except Exception as e: print(f"BDInfo Edition Guess Error: {e}") edition = "" else: try: - edition = guess.get('edition', "") + edition = guess.get("edition", "") except Exception as e: print(f"Video Edition Guess Error: {e}") edition = "" @@ -2340,7 +4981,9 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): if len(filelist) == 1: video = os.path.basename(video) - video = video.upper().replace('.', ' ').replace(tag.upper(), '').replace('-', '') + video = ( + video.upper().replace(".", " ").replace(tag.upper(), "").replace("-", "") + ) if "OPEN MATTE" in video: edition = edition + " Open Matte" @@ -2364,8 +5007,10 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): print(f"Repack after Checks: {repack}") # Only remove REPACK, RERIP, or PROPER from edition if they're not part of manual_edition - edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() - bad = ['internal', 'limited', 'retail'] + edition = re.sub( + r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE + ).strip() + bad = ["internal", "limited", "retail"] if edition.lower() in bad: edition = "" @@ -2375,6 +5020,7 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): """ Create Torrent """ + class CustomTorrent(torf.Torrent): # Default piece size limits torf.Torrent.piece_size_min = 16384 # 16 KiB @@ -2384,18 +5030,26 @@ def __init__(self, meta, *args, **kwargs): super().__init__(*args, **kwargs) # Override piece_size_max if meta['max_piece_size'] is specified - if 'max_piece_size' in meta and meta['max_piece_size']: + if "max_piece_size" in meta and meta["max_piece_size"]: try: - max_piece_size_mib = int(meta['max_piece_size']) * 1024 * 1024 # Convert MiB to bytes - self.piece_size_max = min(max_piece_size_mib, torf.Torrent.piece_size_max) + max_piece_size_mib = ( + int(meta["max_piece_size"]) * 1024 * 1024 + ) # Convert MiB to bytes + self.piece_size_max = min( + max_piece_size_mib, torf.Torrent.piece_size_max + ) except ValueError: - self.piece_size_max = torf.Torrent.piece_size_max # Fallback to default if conversion fails + self.piece_size_max = ( + torf.Torrent.piece_size_max + ) # Fallback to default if conversion fails else: self.piece_size_max = torf.Torrent.piece_size_max # Calculate and set the piece size total_size = self._calculate_total_size() - piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) + piece_size = self.calculate_piece_size( + total_size, self.piece_size_min, self.piece_size_max, self.files + ) self.piece_size = piece_size @property @@ -2406,20 +5060,30 @@ def piece_size(self): def piece_size(self, value): if value is None: total_size = self._calculate_total_size() - value = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) + value = self.calculate_piece_size( + total_size, self.piece_size_min, self.piece_size_max, self.files + ) self._piece_size = value - self.metainfo['info']['piece length'] = value # Ensure 'piece length' is set + self.metainfo["info"][ + "piece length" + ] = value # Ensure 'piece length' is set @classmethod def calculate_piece_size(cls, total_size, min_size, max_size, files): our_min_size = 16384 - our_max_size = max_size if max_size else 67108864 # Default to 64 MiB if max_size is None + our_max_size = ( + max_size if max_size else 67108864 + ) # Default to 64 MiB if max_size is None piece_size = 8388608 # Start with 8 MiB num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size + torrent_file_size = ( + 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + ) # Approximate .torrent size # Adjust the piece size to fit within the constraints - while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 102400): # 100 KiB .torrent size limit + while not ( + 1000 <= num_pieces <= 2000 and torrent_file_size <= 102400 + ): # 100 KiB .torrent size limit if num_pieces < 1000: piece_size //= 2 if piece_size < our_min_size: @@ -2428,17 +5092,21 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): elif num_pieces > 2000: piece_size *= 2 if piece_size > our_max_size: - cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces and .torrent will be approximately {torrent_file_size / 1024:.2f} KiB! Using ({num_pieces}) pieces.") + cli_ui.warning( + f"Warning: Piece size exceeded 2000 pieces and .torrent will be approximately {torrent_file_size / 1024:.2f} KiB! Using ({num_pieces}) pieces." + ) piece_size = our_max_size break elif torrent_file_size > 102400: - cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') + cli_ui.warning("WARNING: .torrent size will exceed 100 KiB!") piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size break num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + torrent_file_size = ( + 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + ) return piece_size @@ -2447,29 +5115,45 @@ def _calculate_total_size(self): @classmethod def _calculate_pathname_bytes(cls, files): - total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + total_pathname_bytes = sum(len(str(file).encode("utf-8")) for file in files) return total_pathname_bytes def validate_piece_size(self): - if not hasattr(self, '_piece_size') or self._piece_size is None: - self.piece_size = self.calculate_piece_size(self._calculate_total_size(), self.piece_size_min, self.piece_size_max, self.files) - self.metainfo['info']['piece length'] = self.piece_size # Ensure 'piece length' is set + if not hasattr(self, "_piece_size") or self._piece_size is None: + self.piece_size = self.calculate_piece_size( + self._calculate_total_size(), + self.piece_size_min, + self.piece_size_max, + self.files, + ) + self.metainfo["info"][ + "piece length" + ] = self.piece_size # Ensure 'piece length' is set def create_torrent(self, meta, path, output_filename): # Handle directories and file inclusion logic - if meta['isdir']: - if meta['keep_folder']: - cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') + if meta["isdir"]: + if meta["keep_folder"]: + cli_ui.info( + "--keep-folder was specified. Using complete folder for torrent creation." + ) else: os.chdir(path) - globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") + globs = ( + glob.glob1(path, "*.mkv") + + glob.glob1(path, "*.mp4") + + glob.glob1(path, "*.ts") + ) no_sample_globs = [] for file in globs: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + if ( + not file.lower().endswith("sample.mkv") + or "!sample" in file.lower() + ): no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) if len(no_sample_globs) == 1: - path = meta['filelist'][0] - if meta['is_disc']: + path = meta["filelist"][0] + if meta["is_disc"]: include, exclude = "", "" else: exclude = ["*.*", "*sample.mkv", "!sample*.*"] @@ -2486,7 +5170,7 @@ def create_torrent(self, meta, path, output_filename): include_globs=include or [], creation_date=datetime.now(), comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" + created_by="L4G's Upload Assistant", ) # Ensure piece size is validated before writing @@ -2494,7 +5178,10 @@ def create_torrent(self, meta, path, output_filename): # Generate and write the new torrent torrent.generate(callback=self.torf_cb, interval=5) - torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) + torrent.write( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", + overwrite=True, + ) torrent.verify_filesize(path) console.print("[bold green].torrent created", end="\r") @@ -2509,35 +5196,66 @@ def create_random_torrents(self, base_dir, uuid, num, path): base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") for i in range(1, int(num) + 1): new_torrent = base_torrent - new_torrent.metainfo['info']['entropy'] = random.randint(1, 999999) - Torrent.copy(new_torrent).write(f"{base_dir}/tmp/{uuid}/[RAND-{i}]{manual_name}.torrent", overwrite=True) + new_torrent.metainfo["info"]["entropy"] = random.randint(1, 999999) + Torrent.copy(new_torrent).write( + f"{base_dir}/tmp/{uuid}/[RAND-{i}]{manual_name}.torrent", overwrite=True + ) def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): if os.path.exists(torrentpath): base_torrent = Torrent.read(torrentpath) - base_torrent.trackers = ['https://fake.tracker'] + base_torrent.trackers = ["https://fake.tracker"] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" # Remove Un-whitelisted info from torrent - for each in list(base_torrent.metainfo['info']): - if each not in ('files', 'length', 'name', 'piece length', 'pieces', 'private', 'source'): - base_torrent.metainfo['info'].pop(each, None) + for each in list(base_torrent.metainfo["info"]): + if each not in ( + "files", + "length", + "name", + "piece length", + "pieces", + "private", + "source", + ): + base_torrent.metainfo["info"].pop(each, None) for each in list(base_torrent.metainfo): - if each not in ('announce', 'comment', 'creation date', 'created by', 'encoding', 'info'): + if each not in ( + "announce", + "comment", + "creation date", + "created by", + "encoding", + "info", + ): base_torrent.metainfo.pop(each, None) - base_torrent.source = 'L4G' + base_torrent.source = "L4G" base_torrent.private = True - Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) + Torrent.copy(base_torrent).write( + f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True + ) """ Upload Screenshots """ - def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False): + + def upload_screens( + self, + meta, + screens, + img_host_num, + i, + total_screens, + custom_img_list, + return_dict, + retry_mode=False, + ): import nest_asyncio + nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] - img_host = meta['imghost'] # Use the correctly updated image host from meta + initial_img_host = self.config["DEFAULT"][f"img_host_{img_host_num}"] + img_host = meta["imghost"] # Use the correctly updated image host from meta image_list = [] @@ -2546,24 +5264,34 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i existing_images = [] else: image_glob = glob.glob("*.png") - if 'POSTER.png' in image_glob: - image_glob.remove('POSTER.png') - existing_images = meta.get('image_list', []) - - if len(existing_images) >= total_screens and not retry_mode and img_host == initial_img_host: - console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") + if "POSTER.png" in image_glob: + image_glob.remove("POSTER.png") + existing_images = meta.get("image_list", []) + + if ( + len(existing_images) >= total_screens + and not retry_mode + and img_host == initial_img_host + ): + console.print( + f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}" + ) return existing_images, total_screens if img_host == "imgbox": # Handle Imgbox uploads without the main progress bar console.print("[green]Uploading Screens to Imgbox...") - image_list = asyncio.run(self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob)) + image_list = asyncio.run( + self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob) + ) if not image_list: console.print("[yellow]Imgbox failed, trying next image host") img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + img_host = self.config["DEFAULT"].get(f"img_host_{img_host_num}") if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") + console.print( + "[red]All image hosts failed. Unable to complete uploads." + ) return image_list, i else: return image_list, i # Return after successful Imgbox upload @@ -2572,117 +5300,168 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i TextColumn("[bold green]Uploading Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() + TimeRemainingColumn(), ) as progress: while True: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) + upload_task = progress.add_task( + f"[green]Uploading Screens to {img_host}...", + total=len(image_glob[-screens:]), + ) for image in image_glob[-screens:]: try: timeout = 60 if img_host == "ptpimg": payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] + "format": "json", + "api_key": self.config["DEFAULT"]["ptpimg_api"], } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) + files = [("file-upload[0]", open(image, "rb"))] + headers = {"referer": "https://ptpimg.me/index.php"} + response = requests.post( + "https://ptpimg.me/upload.php", + headers=headers, + data=payload, + files=files, + ) response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + ptpimg_code = response[0]["code"] + ptpimg_ext = response[0]["ext"] + img_url = ( + f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + ) raw_url = img_url web_url = img_url elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + "key": self.config["DEFAULT"]["imgbb_api"], + "image": base64.b64encode( + open(image, "rb").read() + ).decode("utf8"), } - response = requests.post(url, data=data, timeout=timeout) + response = requests.post( + url, data=data, timeout=timeout + ) response = response.json() - img_url = response['data']['image']['url'] + img_url = response["data"]["image"]["url"] raw_url = img_url web_url = img_url elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + "image": base64.b64encode( + open(image, "rb").read() + ).decode("utf8") } headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + "X-API-Key": self.config["DEFAULT"][ + "ptscreens_api" + ], } - response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = requests.post( + url, data=data, headers=headers, timeout=timeout + ) response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") + if response.get("status_code") != 200: + console.print( + "[yellow]PT Screens failed, trying next image host" + ) break - img_url = response['data']['image']['url'] + img_url = response["data"]["image"]["url"] raw_url = img_url web_url = img_url elif img_host == "pixhost": url = "https://api.pixhost.to/images" data = { - 'content_type': '0', - 'max_th_size': 350, + "content_type": "0", + "max_th_size": 350, } files = { - 'img': ('file-upload[0]', open(image, 'rb')), + "img": ("file-upload[0]", open(image, "rb")), } - response = requests.post(url, data=data, files=files, timeout=timeout) + response = requests.post( + url, data=data, files=files, timeout=timeout + ) if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") + console.print( + "[yellow]Pixhost failed, trying next image host" + ) break response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] + raw_url = ( + response["th_url"] + .replace("https://t", "https://img") + .replace("/thumbs/", "/images/") + ) + img_url = response["th_url"] + web_url = response["show_url"] elif img_host == "lensdump": url = "https://lensdump.com/api/1/upload" data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + "image": base64.b64encode( + open(image, "rb").read() + ).decode("utf8") } headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + "X-API-Key": self.config["DEFAULT"]["lensdump_api"], } - response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = requests.post( + url, data=data, headers=headers, timeout=timeout + ) response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") + if response.get("status_code") != 200: + console.print( + "[yellow]Lensdump failed, trying next image host" + ) break - img_url = response['data']['image']['url'] + img_url = response["data"]["image"]["url"] raw_url = img_url - web_url = response['data']['url_viewer'] + web_url = response["data"]["url_viewer"] else: - console.print(f"[red]Unsupported image host: {img_host}") + console.print( + f"[red]Unsupported image host: {img_host}" + ) break # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') + progress.console.print( + f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", + end="\r", + ) # Add the image details to the list - image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_dict = { + "img_url": img_url, + "raw_url": raw_url, + "web_url": web_url, + } image_list.append(image_dict) progress.advance(upload_task) i += 1 except Exception as e: - console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") + console.print( + f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}" + ) break time.sleep(0.5) if i >= total_screens: - return_dict['image_list'] = image_list - console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") + return_dict["image_list"] = image_list + console.print( + f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}" + ) return image_list, i # If we broke out of the loop due to a failure, switch to the next host and retry img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + img_host = self.config["DEFAULT"].get(f"img_host_{img_host_num}") if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") + console.print( + "[red]All image hosts failed. Unable to complete uploads." + ) return image_list, i # Ensure that if all attempts fail, a valid tuple is returned @@ -2697,20 +5476,24 @@ async def imgbox_upload(self, chdir, image_glob): TextColumn("[bold green]Uploading Screens to Imgbox..."), BarColumn(), "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() + TimeRemainingColumn(), ) as progress: upload_task = progress.add_task("Uploading...", total=len(image_glob)) - async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: + async with pyimgbox.Gallery( + thumb_width=350, square_thumbs=False + ) as gallery: async for submission in gallery.add(image_glob): - if not submission['success']: - console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") + if not submission["success"]: + console.print( + f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]" + ) return [] else: image_dict = {} - image_dict['web_url'] = submission['web_url'] - image_dict['img_url'] = submission['thumbnail_url'] - image_dict['raw_url'] = submission['image_url'] + image_dict["web_url"] = submission["web_url"] + image_dict["img_url"] = submission["thumbnail_url"] + image_dict["raw_url"] = submission["image_url"] image_list.append(image_dict) # Update the progress bar @@ -2719,48 +5502,48 @@ async def imgbox_upload(self, chdir, image_glob): return image_list async def get_name(self, meta): - type = meta.get('type', "") - title = meta.get('title', "") - alt_title = meta.get('aka', "") - year = meta.get('year', "") - resolution = meta.get('resolution', "") + type = meta.get("type", "") + title = meta.get("title", "") + alt_title = meta.get("aka", "") + year = meta.get("year", "") + resolution = meta.get("resolution", "") if resolution == "OTHER": resolution = "" - audio = meta.get('audio', "") - service = meta.get('service', "") - season = meta.get('season', "") - episode = meta.get('episode', "") - part = meta.get('part', "") - repack = meta.get('repack', "") - three_d = meta.get('3D', "") - tag = meta.get('tag', "") - source = meta.get('source', "") - uhd = meta.get('uhd', "") - hdr = meta.get('hdr', "") - episode_title = meta.get('episode_title', '') - if meta.get('is_disc', "") == "BDMV": # Disk - video_codec = meta.get('video_codec', "") - region = meta.get('region', "") - elif meta.get('is_disc', "") == "DVD": - region = meta.get('region', "") - dvd_size = meta.get('dvd_size', "") + audio = meta.get("audio", "") + service = meta.get("service", "") + season = meta.get("season", "") + episode = meta.get("episode", "") + part = meta.get("part", "") + repack = meta.get("repack", "") + three_d = meta.get("3D", "") + tag = meta.get("tag", "") + source = meta.get("source", "") + uhd = meta.get("uhd", "") + hdr = meta.get("hdr", "") + episode_title = meta.get("episode_title", "") + if meta.get("is_disc", "") == "BDMV": # Disk + video_codec = meta.get("video_codec", "") + region = meta.get("region", "") + elif meta.get("is_disc", "") == "DVD": + region = meta.get("region", "") + dvd_size = meta.get("dvd_size", "") else: - video_codec = meta.get('video_codec', "") - video_encode = meta.get('video_encode', "") - edition = meta.get('edition', "") + video_codec = meta.get("video_codec", "") + video_encode = meta.get("video_encode", "") + edition = meta.get("edition", "") - if meta['category'] == "TV": - if meta['search_year'] != "": - year = meta['year'] + if meta["category"] == "TV": + if meta["search_year"] != "": + year = meta["year"] else: year = "" - if meta.get('no_season', False) is True: - season = '' - if meta.get('no_year', False) is True: - year = '' - if meta.get('no_aka', False) is True: - alt_title = '' - if meta['debug']: + if meta.get("no_season", False) is True: + season = "" + if meta.get("no_year", False) is True: + year = "" + if meta.get("no_aka", False) is True: + alt_title = "" + if meta["debug"]: console.log("[cyan]get_name cat/type") console.log(f"CATEGORY: {meta['category']}") console.log(f"TYPE: {meta['type']}") @@ -2768,69 +5551,78 @@ async def get_name(self, meta): console.log(meta) # YAY NAMING FUN - if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if meta["category"] == "MOVIE": # MOVIE SPECIFIC if type == "DISC": # Disk - if meta['is_disc'] == 'BDMV': + if meta["is_disc"] == "BDMV": name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" - potential_missing = ['edition', 'region', 'distributor'] - elif meta['is_disc'] == 'DVD': + potential_missing = ["edition", "region", "distributor"] + elif meta["is_disc"] == "DVD": name = f"{title} {alt_title} {year} {edition} {repack} {source} {dvd_size} {audio}" - potential_missing = ['edition', 'distributor'] - elif meta['is_disc'] == 'HDDVD': + potential_missing = ["edition", "distributor"] + elif meta["is_disc"] == "HDDVD": name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" - potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay/HDDVD Remux + potential_missing = ["edition", "region", "distributor"] + elif type == "REMUX" and source in ( + "BluRay", + "HDDVD", + ): # BluRay/HDDVD Remux name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" - potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + potential_missing = ["edition", "description"] + elif type == "REMUX" and source in ( + "PAL DVD", + "NTSC DVD", + "DVD", + ): # DVD Remux name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" - potential_missing = ['edition', 'description'] + potential_missing = ["edition", "description"] elif type == "ENCODE": # Encode name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" - potential_missing = ['edition', 'description'] + potential_missing = ["edition", "description"] elif type == "WEBDL": # WEB-DL name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" - potential_missing = ['edition', 'service'] + potential_missing = ["edition", "service"] elif type == "WEBRIP": # WEBRip name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" - potential_missing = ['edition', 'service'] + potential_missing = ["edition", "service"] elif type == "HDTV": # HDTV name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] - elif meta['category'] == "TV": # TV SPECIFIC + elif meta["category"] == "TV": # TV SPECIFIC if type == "DISC": # Disk - if meta['is_disc'] == 'BDMV': + if meta["is_disc"] == "BDMV": name = f"{title} {year} {alt_title} {season}{episode} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" - potential_missing = ['edition', 'region', 'distributor'] - if meta['is_disc'] == 'DVD': + potential_missing = ["edition", "region", "distributor"] + if meta["is_disc"] == "DVD": name = f"{title} {alt_title} {season}{episode}{three_d} {edition} {repack} {source} {dvd_size} {audio}" - potential_missing = ['edition', 'distributor'] - elif meta['is_disc'] == 'HDDVD': + potential_missing = ["edition", "distributor"] + elif meta["is_disc"] == "HDDVD": name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" - potential_missing = ['edition', 'region', 'distributor'] + potential_missing = ["edition", "region", "distributor"] elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay Remux name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" # SOURCE - potential_missing = ['edition', 'description'] + potential_missing = ["edition", "description"] elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" # SOURCE - potential_missing = ['edition', 'description'] + potential_missing = ["edition", "description"] elif type == "ENCODE": # Encode name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" # SOURCE - potential_missing = ['edition', 'description'] + potential_missing = ["edition", "description"] elif type == "WEBDL": # WEB-DL name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" - potential_missing = ['edition', 'service'] + potential_missing = ["edition", "service"] elif type == "WEBRIP": # WEBRip name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" - potential_missing = ['edition', 'service'] + potential_missing = ["edition", "service"] elif type == "HDTV": # HDTV name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] try: - name = ' '.join(name.split()) + name = " ".join(name.split()) except Exception: - console.print("[bold red]Unable to generate name. Please re-run and correct any of the following args if needed.") + console.print( + "[bold red]Unable to generate name. Please re-run and correct any of the following args if needed." + ) console.print(f"--category [yellow]{meta['category']}") console.print(f"--type [yellow]{meta['type']}") console.print(f"--source [yellow]{meta['source']}") @@ -2842,16 +5634,16 @@ async def get_name(self, meta): return name_notag, name, clean_name, potential_missing async def get_season_episode(self, video, meta): - if meta['category'] == 'TV': - filelist = meta['filelist'] - meta['tv_pack'] = 0 + if meta["category"] == "TV": + filelist = meta["filelist"] + meta["tv_pack"] = 0 is_daily = False - if meta['anime'] is False: + if meta["anime"] is False: try: - if meta.get('manual_date'): + if meta.get("manual_date"): raise ManualDateException # noqa: F405 try: - guess_year = guessit(video)['year'] + guess_year = guessit(video)["year"] except Exception: guess_year = "" if guessit(video)["season"] == guess_year: @@ -2867,8 +5659,14 @@ async def get_season_episode(self, video, meta): except Exception: try: - guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] - season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) + guess_date = ( + meta.get("manual_date", guessit(video)["date"]) + if meta.get("manual_date") + else guessit(video)["date"] + ) + season_int, episode_int = self.daily_to_tmdb_season_episode( + meta.get("tmdb"), guess_date + ) # season = f"S{season_int.zfill(2)}" # episode = f"E{episode_int.zfill(2)}" season = str(guess_date) @@ -2882,11 +5680,11 @@ async def get_season_episode(self, video, meta): if is_daily is not True: episodes = "" if len(filelist) == 1: - episodes = guessit(video)['episode'] + episodes = guessit(video)["episode"] if isinstance(episodes, list): episode = "" for item in guessit(video)["episode"]: - ep = (str(item).zfill(2)) + ep = str(item).zfill(2) episode += f"E{ep}" episode_int = episodes[0] else: @@ -2895,204 +5693,550 @@ async def get_season_episode(self, video, meta): else: episode = "" episode_int = "0" - meta['tv_pack'] = 1 + meta["tv_pack"] = 1 except Exception: episode = "" episode_int = "0" - meta['tv_pack'] = 1 + meta["tv_pack"] = 1 else: # If Anime parsed = anitopy.parse(Path(video).name) - romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) + romaji, mal_id, eng_title, seasonYear, anilist_episodes = ( + self.get_romaji(parsed["anime_title"], meta.get("mal", None)) + ) if mal_id: - meta['mal_id'] = mal_id - if meta.get('tmdb_manual', None) is None: - year = parsed.get('anime_year', str(seasonYear)) - meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) + meta["mal_id"] = mal_id + if meta.get("tmdb_manual", None) is None: + year = parsed.get("anime_year", str(seasonYear)) + meta = await self.get_tmdb_id( + guessit( + parsed["anime_title"], {"excludes": ["country", "language"]} + )["title"], + year, + meta, + meta["category"], + ) meta = await self.tmdb_other_meta(meta) - if meta['category'] != "TV": + if meta["category"] != "TV": return meta - tag = parsed.get('release_group', "") + tag = parsed.get("release_group", "") if tag != "": - meta['tag'] = f"-{tag}" + meta["tag"] = f"-{tag}" if len(filelist) == 1: try: - episodes = parsed.get('episode_number', guessit(video).get('episode', '1')) + episodes = parsed.get( + "episode_number", guessit(video).get("episode", "1") + ) if not isinstance(episodes, list) and not episodes.isnumeric(): - episodes = guessit(video)['episode'] + episodes = guessit(video)["episode"] if isinstance(episodes, list): episode_int = int(episodes[0]) # Always convert to integer - episode = "".join([f"E{str(int(item)).zfill(2)}" for item in episodes]) + episode = "".join( + [f"E{str(int(item)).zfill(2)}" for item in episodes] + ) else: episode_int = int(episodes) # Convert to integer episode = f"E{str(episode_int).zfill(2)}" except Exception: episode = "E01" episode_int = 1 # Ensure it's an integer - console.print('[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed') + console.print( + "[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed" + ) await asyncio.sleep(1.5) else: episode = "" episode_int = 0 # Ensure it's an integer - meta['tv_pack'] = 1 + meta["tv_pack"] = 1 try: - if meta.get('season_int'): - season_int = int(meta.get('season_int')) # Convert to integer + if meta.get("season_int"): + season_int = int(meta.get("season_int")) # Convert to integer else: - season = parsed.get('anime_season', guessit(video).get('season', '1')) + season = parsed.get( + "anime_season", guessit(video).get("season", "1") + ) season_int = int(season) # Convert to integer season = f"S{str(season_int).zfill(2)}" except Exception: try: if episode_int >= anilist_episodes: params = { - 'id': str(meta['tvdb_id']), - 'origin': 'tvdb', - 'absolute': str(episode_int), + "id": str(meta["tvdb_id"]), + "origin": "tvdb", + "absolute": str(episode_int), } url = "https://thexem.info/map/single" response = requests.post(url, params=params).json() - if response['result'] == "failure": + if response["result"] == "failure": raise XEMNotFound # noqa: F405 - if meta['debug']: - console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") - season_int = int(response['data']['scene']['season']) # Convert to integer + if meta["debug"]: + console.log( + f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}" + ) + season_int = int( + response["data"]["scene"]["season"] + ) # Convert to integer season = f"S{str(season_int).zfill(2)}" if len(filelist) == 1: - episode_int = int(response['data']['scene']['episode']) # Convert to integer + episode_int = int( + response["data"]["scene"]["episode"] + ) # Convert to integer episode = f"E{str(episode_int).zfill(2)}" else: season_int = 1 # Default to 1 if error occurs season = "S01" names_url = f"https://thexem.info/map/names?origin=tvdb&id={str(meta['tvdb_id'])}" names_response = requests.get(names_url).json() - if meta['debug']: - console.log(f'[cyan]Matching Season Number from TheXEM\n{names_response}') + if meta["debug"]: + console.log( + f"[cyan]Matching Season Number from TheXEM\n{names_response}" + ) difference = 0 - if names_response['result'] == "success": - for season_num, values in names_response['data'].items(): + if names_response["result"] == "success": + for season_num, values in names_response[ + "data" + ].items(): for lang, names in values.items(): if lang == "jp": for name in names: - romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) - name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) - diff = SequenceMatcher(None, romaji_check, name_check).ratio() - if romaji_check in name_check and diff >= difference: - season_int = int(season_num) if season_num != "all" else 1 # Convert to integer - season = f"S{str(season_int).zfill(2)}" + romaji_check = re.sub( + r"[^0-9a-zA-Z\[\\]]+", + "", + romaji.lower().replace(" ", ""), + ) + name_check = re.sub( + r"[^0-9a-zA-Z\[\\]]+", + "", + name.lower().replace(" ", ""), + ) + diff = SequenceMatcher( + None, romaji_check, name_check + ).ratio() + if ( + romaji_check in name_check + and diff >= difference + ): + season_int = ( + int(season_num) + if season_num != "all" + else 1 + ) # Convert to integer + season = ( + f"S{str(season_int).zfill(2)}" + ) difference = diff if lang == "us": for name in names: - eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) - name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) - diff = SequenceMatcher(None, eng_check, name_check).ratio() - if eng_check in name_check and diff >= difference: - season_int = int(season_num) if season_num != "all" else 1 # Convert to integer - season = f"S{str(season_int).zfill(2)}" + eng_check = re.sub( + r"[^0-9a-zA-Z\[\\]]+", + "", + eng_title.lower().replace(" ", ""), + ) + name_check = re.sub( + r"[^0-9a-zA-Z\[\\]]+", + "", + name.lower().replace(" ", ""), + ) + diff = SequenceMatcher( + None, eng_check, name_check + ).ratio() + if ( + eng_check in name_check + and diff >= difference + ): + season_int = ( + int(season_num) + if season_num != "all" + else 1 + ) # Convert to integer + season = ( + f"S{str(season_int).zfill(2)}" + ) difference = diff else: raise XEMNotFound # noqa: F405 except Exception: - if meta['debug']: + if meta["debug"]: console.print_exception() try: - season = guessit(video).get('season', '1') + season = guessit(video).get("season", "1") season_int = int(season) # Convert to integer except Exception: season_int = 1 # Default to 1 if error occurs season = "S01" - console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") - console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") + console.print( + f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}" + ) + console.print( + f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct" + ) await asyncio.sleep(3) - if meta.get('manual_season', None) is None: - meta['season'] = season + if meta.get("manual_season", None) is None: + meta["season"] = season else: - season_int = meta['manual_season'].lower().replace('s', '') - meta['season'] = f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" - if meta.get('manual_episode', None) is None: - meta['episode'] = episode + season_int = meta["manual_season"].lower().replace("s", "") + meta["season"] = ( + f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" + ) + if meta.get("manual_episode", None) is None: + meta["episode"] = episode else: - episode_int = meta['manual_episode'].lower().replace('e', '') - meta['episode'] = f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" - meta['tv_pack'] = 0 + episode_int = meta["manual_episode"].lower().replace("e", "") + meta["episode"] = ( + f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" + ) + meta["tv_pack"] = 0 # if " COMPLETE " in Path(video).name.replace('.', ' '): # meta['season'] = "COMPLETE" - meta['season_int'] = season_int - meta['episode_int'] = episode_int + meta["season_int"] = season_int + meta["episode_int"] = episode_int - meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') - if meta['season'] == "S00" or meta['episode'] == "E00": - meta['episode_title'] = meta['episode_title_storage'] + meta["episode_title_storage"] = guessit(video, {"excludes": "part"}).get( + "episode_title", "" + ) + if meta["season"] == "S00" or meta["episode"] == "E00": + meta["episode_title"] = meta["episode_title_storage"] # Guess the part of the episode (if available) - meta['part'] = "" - if meta['tv_pack'] == 1: - part = guessit(os.path.dirname(video)).get('part') - meta['part'] = f"Part {part}" if part else "" + meta["part"] = "" + if meta["tv_pack"] == 1: + part = guessit(os.path.dirname(video)).get("part") + meta["part"] = f"Part {part}" if part else "" return meta - def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_services_only=False): + def get_service( + self, + video=None, + tag=None, + audio=None, + guess_title=None, + get_services_only=False, + ): services = { - '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', - 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', - 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', - 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', - 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', - 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', - 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', - 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', - 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', - 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', - 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', - 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', - 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', - 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', - 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', - 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', - 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', - 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', - 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', - 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', - 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', - 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', - 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', - 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', - 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', - 'MA': 'MA', 'Movies Anywhere': 'MA', 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', - 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', - 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', - 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', - 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', - 'PMNT': 'PMNT', 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', - 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', 'SkyShowtime': 'SKST', - 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', - 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', - 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', - 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', - 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', - 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', - 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', - 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', - 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', - 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' + "9NOW": "9NOW", + "9Now": "9NOW", + "AE": "AE", + "A&E": "AE", + "AJAZ": "AJAZ", + "Al Jazeera English": "AJAZ", + "ALL4": "ALL4", + "Channel 4": "ALL4", + "AMBC": "AMBC", + "ABC": "AMBC", + "AMC": "AMC", + "AMZN": "AMZN", + "Amazon Prime": "AMZN", + "ANLB": "ANLB", + "AnimeLab": "ANLB", + "ANPL": "ANPL", + "Animal Planet": "ANPL", + "AOL": "AOL", + "ARD": "ARD", + "AS": "AS", + "Adult Swim": "AS", + "ATK": "ATK", + "America's Test Kitchen": "ATK", + "ATVP": "ATVP", + "AppleTV": "ATVP", + "AUBC": "AUBC", + "ABC Australia": "AUBC", + "BCORE": "BCORE", + "BKPL": "BKPL", + "Blackpills": "BKPL", + "BluTV": "BLU", + "Binge": "BNGE", + "BOOM": "BOOM", + "Boomerang": "BOOM", + "BRAV": "BRAV", + "BravoTV": "BRAV", + "CBC": "CBC", + "CBS": "CBS", + "CC": "CC", + "Comedy Central": "CC", + "CCGC": "CCGC", + "Comedians in Cars Getting Coffee": "CCGC", + "CHGD": "CHGD", + "CHRGD": "CHGD", + "CMAX": "CMAX", + "Cinemax": "CMAX", + "CMOR": "CMOR", + "CMT": "CMT", + "Country Music Television": "CMT", + "CN": "CN", + "Cartoon Network": "CN", + "CNBC": "CNBC", + "CNLP": "CNLP", + "Canal+": "CNLP", + "COOK": "COOK", + "CORE": "CORE", + "CR": "CR", + "Crunchy Roll": "CR", + "Crave": "CRAV", + "CRIT": "CRIT", + "Criterion": "CRIT", + "CRKL": "CRKL", + "Crackle": "CRKL", + "CSPN": "CSPN", + "CSpan": "CSPN", + "CTV": "CTV", + "CUR": "CUR", + "CuriosityStream": "CUR", + "CW": "CW", + "The CW": "CW", + "CWS": "CWS", + "CWSeed": "CWS", + "DAZN": "DAZN", + "DCU": "DCU", + "DC Universe": "DCU", + "DDY": "DDY", + "Digiturk Diledigin Yerde": "DDY", + "DEST": "DEST", + "DramaFever": "DF", + "DHF": "DHF", + "Deadhouse Films": "DHF", + "DISC": "DISC", + "Discovery": "DISC", + "DIY": "DIY", + "DIY Network": "DIY", + "DOCC": "DOCC", + "Doc Club": "DOCC", + "DPLY": "DPLY", + "DPlay": "DPLY", + "DRPO": "DRPO", + "Discovery Plus": "DSCP", + "DSKI": "DSKI", + "Daisuki": "DSKI", + "DSNP": "DSNP", + "Disney+": "DSNP", + "DSNY": "DSNY", + "Disney": "DSNY", + "DTV": "DTV", + "EPIX": "EPIX", + "ePix": "EPIX", + "ESPN": "ESPN", + "ESQ": "ESQ", + "Esquire": "ESQ", + "ETTV": "ETTV", + "El Trece": "ETTV", + "ETV": "ETV", + "E!": "ETV", + "FAM": "FAM", + "Fandor": "FANDOR", + "Facebook Watch": "FBWatch", + "FJR": "FJR", + "Family Jr": "FJR", + "FOOD": "FOOD", + "Food Network": "FOOD", + "FOX": "FOX", + "Fox": "FOX", + "Fox Premium": "FOXP", + "UFC Fight Pass": "FP", + "FPT": "FPT", + "FREE": "FREE", + "Freeform": "FREE", + "FTV": "FTV", + "FUNI": "FUNI", + "FUNi": "FUNI", + "Foxtel": "FXTL", + "FYI": "FYI", + "FYI Network": "FYI", + "GC": "GC", + "NHL GameCenter": "GC", + "GLBL": "GLBL", + "Global": "GLBL", + "GLOB": "GLOB", + "GloboSat Play": "GLOB", + "GO90": "GO90", + "GagaOOLala": "Gaga", + "HBO": "HBO", + "HBO Go": "HBO", + "HGTV": "HGTV", + "HIDI": "HIDI", + "HIST": "HIST", + "History": "HIST", + "HLMK": "HLMK", + "Hallmark": "HLMK", + "HMAX": "HMAX", + "HBO Max": "HMAX", + "HS": "HTSR", + "HTSR": "HTSR", + "HSTR": "Hotstar", + "HULU": "HULU", + "Hulu": "HULU", + "hoichoi": "HoiChoi", + "ID": "ID", + "Investigation Discovery": "ID", + "IFC": "IFC", + "iflix": "IFX", + "National Audiovisual Institute": "INA", + "ITV": "ITV", + "KAYO": "KAYO", + "KNOW": "KNOW", + "Knowledge Network": "KNOW", + "KNPY": "KNPY", + "Kanopy": "KNPY", + "LIFE": "LIFE", + "Lifetime": "LIFE", + "LN": "LN", + "MA": "MA", + "Movies Anywhere": "MA", + "MAX": "MAX", + "MBC": "MBC", + "MNBC": "MNBC", + "MSNBC": "MNBC", + "MTOD": "MTOD", + "Motor Trend OnDemand": "MTOD", + "MTV": "MTV", + "MUBI": "MUBI", + "NATG": "NATG", + "National Geographic": "NATG", + "NBA": "NBA", + "NBA TV": "NBA", + "NBC": "NBC", + "NF": "NF", + "Netflix": "NF", + "National Film Board": "NFB", + "NFL": "NFL", + "NFLN": "NFLN", + "NFL Now": "NFLN", + "NICK": "NICK", + "Nickelodeon": "NICK", + "NRK": "NRK", + "Norsk Rikskringkasting": "NRK", + "OnDemandKorea": "ODK", + "Opto": "OPTO", + "Oprah Winfrey Network": "OWN", + "PA": "PA", + "PBS": "PBS", + "PBSK": "PBSK", + "PBS Kids": "PBSK", + "PCOK": "PCOK", + "Peacock": "PCOK", + "PLAY": "PLAY", + "PLUZ": "PLUZ", + "Pluzz": "PLUZ", + "PMNP": "PMNP", + "PMNT": "PMNT", + "PMTP": "PMTP", + "POGO": "POGO", + "PokerGO": "POGO", + "PSN": "PSN", + "Playstation Network": "PSN", + "PUHU": "PUHU", + "QIBI": "QIBI", + "RED": "RED", + "YouTube Red": "RED", + "RKTN": "RKTN", + "Rakuten TV": "RKTN", + "The Roku Channel": "ROKU", + "RSTR": "RSTR", + "RTE": "RTE", + "RTE One": "RTE", + "RUUTU": "RUUTU", + "SBS": "SBS", + "Science Channel": "SCI", + "SESO": "SESO", + "SeeSo": "SESO", + "SHMI": "SHMI", + "Shomi": "SHMI", + "SKST": "SKST", + "SkyShowtime": "SKST", + "SHO": "SHO", + "Showtime": "SHO", + "SNET": "SNET", + "Sportsnet": "SNET", + "Sony": "SONY", + "SPIK": "SPIK", + "Spike": "SPIK", + "Spike TV": "SPKE", + "SPRT": "SPRT", + "Sprout": "SPRT", + "STAN": "STAN", + "Stan": "STAN", + "STARZ": "STARZ", + "STRP": "STRP", + "Star+": "STRP", + "STZ": "STZ", + "Starz": "STZ", + "SVT": "SVT", + "Sveriges Television": "SVT", + "SWER": "SWER", + "SwearNet": "SWER", + "SYFY": "SYFY", + "Syfy": "SYFY", + "TBS": "TBS", + "TEN": "TEN", + "TFOU": "TFOU", + "TFou": "TFOU", + "TIMV": "TIMV", + "TLC": "TLC", + "TOU": "TOU", + "TRVL": "TRVL", + "TUBI": "TUBI", + "TubiTV": "TUBI", + "TV3": "TV3", + "TV3 Ireland": "TV3", + "TV4": "TV4", + "TV4 Sweeden": "TV4", + "TVING": "TVING", + "TVL": "TVL", + "TV Land": "TVL", + "TVNZ": "TVNZ", + "UFC": "UFC", + "UKTV": "UKTV", + "UNIV": "UNIV", + "Univision": "UNIV", + "USAN": "USAN", + "USA Network": "USAN", + "VH1": "VH1", + "VIAP": "VIAP", + "VICE": "VICE", + "Viceland": "VICE", + "Viki": "VIKI", + "VIMEO": "VIMEO", + "VLCT": "VLCT", + "Velocity": "VLCT", + "VMEO": "VMEO", + "Vimeo": "VMEO", + "VRV": "VRV", + "VUDU": "VUDU", + "WME": "WME", + "WatchMe": "WME", + "WNET": "WNET", + "W Network": "WNET", + "WWEN": "WWEN", + "WWE Network": "WWEN", + "XBOX": "XBOX", + "Xbox Video": "XBOX", + "YHOO": "YHOO", + "Yahoo": "YHOO", + "YT": "YT", + "ZDF": "ZDF", + "iP": "iP", + "BBC iPlayer": "iP", + "iQIYI": "iQIYI", + "iT": "iT", + "iTunes": "iT", } if get_services_only: return services - service = guessit(video).get('streaming_service', "") + service = guessit(video).get("streaming_service", "") - video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) + video_name = re.sub( + r"[.()]", " ", video.replace(tag, "").replace(guess_title, "") + ) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): - if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes": ["country", "language"]}).get('title', ''): + if (" " + key + " ") in video_name and key not in guessit( + video, {"excludes": ["country", "language"]} + ).get("title", ""): service = value elif key == service: service = value @@ -3112,9 +6256,11 @@ def stream_optimized(self, stream_opt): return stream def is_anon(self, anon_in): - anon = self.config['DEFAULT'].get("Anon", "False") + anon = self.config["DEFAULT"].get("Anon", "False") if anon.lower() == "true": - console.print("[bold red]Global ANON has been removed in favor of per-tracker settings. Please update your config accordingly.") + console.print( + "[bold red]Global ANON has been removed in favor of per-tracker settings. Please update your config accordingly." + ) time.sleep(10) if anon_in is True: anon_out = 1 @@ -3136,110 +6282,162 @@ async def upload_image(self, session, url, data, headers, files): response = await resp.json() return response else: - async with session.post(url=url, data=data, headers=headers, files=files) as resp: + async with session.post( + url=url, data=data, headers=headers, files=files + ) as resp: response = await resp.json() return response def clean_filename(self, name): invalid = '<>:"/\\|?*' for char in invalid: - name = name.replace(char, '-') + name = name.replace(char, "-") return name async def gen_desc(self, meta): def clean_text(text): - return text.replace('\r\n', '').replace('\n', '').strip() + return text.replace("\r\n", "").replace("\n", "").strip() - desclink = meta.get('desclink') - descfile = meta.get('descfile') + desclink = meta.get("desclink") + descfile = meta.get("descfile") ptp_desc = "" imagelist = [] - desc_sources = ['ptp', 'blu', 'aither', 'lst', 'oe', 'tik'] + desc_sources = ["ptp", "blu", "aither", "lst", "oe", "tik"] desc_source = [source.upper() for source in desc_sources if meta.get(source)] desc_source = desc_source[0] if len(desc_source) == 1 else None - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "w", + newline="", + encoding="utf8", + ) as description: description.seek(0) - if (desclink, descfile, meta['desc']) == (None, None, None): - if meta.get('ptp') and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: - if meta.get('skip_gen_desc', False): - console.print("[cyan]Something went wrong with PTP description.") + if (desclink, descfile, meta["desc"]) == (None, None, None): + if ( + meta.get("ptp") + and str( + self.config["TRACKERS"].get("PTP", {}).get("useAPI") + ).lower() + == "true" + and desc_source in ["PTP", None] + ): + if meta.get("skip_gen_desc", False): + console.print( + "[cyan]Something went wrong with PTP description." + ) return meta ptp = PTP(config=self.config) - ptp_desc, imagelist = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) + ptp_desc, imagelist = await ptp.get_ptp_description( + meta["ptp"], meta["is_disc"] + ) if clean_text(ptp_desc): description.write(ptp_desc + "\n") - meta['description'] = 'PTP' - meta['imagelist'] = imagelist - - if not ptp_desc and clean_text(meta.get('blu_desc', '')) and desc_source in ['BLU', None]: - description.write(meta['blu_desc'] + "\n") - meta['description'] = 'BLU' - - if not ptp_desc and clean_text(meta.get('lst_desc', '')) and desc_source in ['LST', None]: - description.write(meta['lst_desc'] + "\n") - meta['description'] = 'LST' - - if not ptp_desc and clean_text(meta.get('aither_desc', '')) and desc_source in ['AITHER', None]: - description.write(meta['aither_desc'] + "\n") - meta['description'] = 'AITHER' - - if not ptp_desc and clean_text(meta.get('oe_desc', '')) and desc_source in ['OE', None]: - description.write(meta['oe_desc'] + "\n") - meta['description'] = 'OE' - - if not ptp_desc and clean_text(meta.get('tike_desc', '')) and desc_source in ['TIK', None]: - description.write(meta['tik_desc'] + "\n") - meta['description'] = 'TIK' - - if meta.get('desc_template'): + meta["description"] = "PTP" + meta["imagelist"] = imagelist + + if ( + not ptp_desc + and clean_text(meta.get("blu_desc", "")) + and desc_source in ["BLU", None] + ): + description.write(meta["blu_desc"] + "\n") + meta["description"] = "BLU" + + if ( + not ptp_desc + and clean_text(meta.get("lst_desc", "")) + and desc_source in ["LST", None] + ): + description.write(meta["lst_desc"] + "\n") + meta["description"] = "LST" + + if ( + not ptp_desc + and clean_text(meta.get("aither_desc", "")) + and desc_source in ["AITHER", None] + ): + description.write(meta["aither_desc"] + "\n") + meta["description"] = "AITHER" + + if ( + not ptp_desc + and clean_text(meta.get("oe_desc", "")) + and desc_source in ["OE", None] + ): + description.write(meta["oe_desc"] + "\n") + meta["description"] = "OE" + + if ( + not ptp_desc + and clean_text(meta.get("tike_desc", "")) + and desc_source in ["TIK", None] + ): + description.write(meta["tik_desc"] + "\n") + meta["description"] = "TIK" + + if meta.get("desc_template"): from jinja2 import Template + try: - with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: + with open( + f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", + "r", + ) as f: template = Template(f.read()) template_desc = template.render(meta) if clean_text(template_desc): description.write(template_desc + "\n") - console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") + console.print( + f"[INFO] Description from template '{meta['desc_template']}' used." + ) except FileNotFoundError: - console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + console.print( + f"[ERROR] Template '{meta['desc_template']}' not found." + ) - if meta.get('nfo'): + if meta.get("nfo"): nfo_files = glob.glob("*.nfo") if nfo_files: nfo = nfo_files[0] - with open(nfo, 'r', encoding="utf-8") as nfo_file: + with open(nfo, "r", encoding="utf-8") as nfo_file: nfo_content = nfo_file.read() description.write(f"[code]{nfo_content}[/code]\n") - meta['description'] = "CUSTOM" + meta["description"] = "CUSTOM" console.print(f"[INFO] NFO file '{nfo}' used.") if desclink: try: - parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) + parsed = urllib.parse.urlparse(desclink.replace("/raw/", "/")) split = os.path.split(parsed.path) - raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") + raw = parsed._replace( + path=( + f"{split[0]}/raw/{split[1]}" + if split[0] != "/" + else f"/raw{parsed.path}" + ) + ) raw_url = urllib.parse.urlunparse(raw) desclink_content = requests.get(raw_url).text description.write(desclink_content + "\n") - meta['description'] = "CUSTOM" + meta["description"] = "CUSTOM" console.print(f"[INFO] Description from link '{desclink}' used.") except Exception as e: console.print(f"[ERROR] Failed to fetch description from link: {e}") if descfile and os.path.isfile(descfile): - with open(descfile, 'r') as f: + with open(descfile, "r") as f: file_content = f.read() description.write(file_content) - meta['description'] = "CUSTOM" + meta["description"] = "CUSTOM" console.print(f"[INFO] Description from file '{descfile}' used.") - if meta.get('desc'): - description.write(meta['desc'] + "\n") - meta['description'] = "CUSTOM" + if meta.get("desc"): + description.write(meta["desc"] + "\n") + meta["description"] = "CUSTOM" console.print("[INFO] Custom description used.") description.write("\n") @@ -3247,102 +6445,139 @@ def clean_text(text): return meta async def tag_override(self, meta): - with open(f"{meta['base_dir']}/data/tags.json", 'r', encoding="utf-8") as f: + with open(f"{meta['base_dir']}/data/tags.json", "r", encoding="utf-8") as f: tags = json.load(f) f.close() for tag in tags: value = tags.get(tag) - if value.get('in_name', "") == tag and tag in meta['path']: - meta['tag'] = f"-{tag}" - if meta['tag'][1:] == tag: + if value.get("in_name", "") == tag and tag in meta["path"]: + meta["tag"] = f"-{tag}" + if meta["tag"][1:] == tag: for key in value: - if key == 'type': + if key == "type": if meta[key] == "ENCODE": meta[key] = value.get(key) else: pass - elif key == 'personalrelease': - meta[key] = bool(str2bool(str(value.get(key, 'False')))) - elif key == 'template': - meta['desc_template'] = value.get(key) + elif key == "personalrelease": + meta[key] = bool(str2bool(str(value.get(key, "False")))) + elif key == "template": + meta["desc_template"] = value.get(key) else: meta[key] = value.get(key) return meta async def package(self, meta): - if meta['tag'] == "": + if meta["tag"] == "": tag = "" else: tag = f" / {meta['tag'][1:]}" - if meta['is_disc'] == "DVD": - res = meta['source'] + if meta["is_disc"] == "DVD": + res = meta["source"] else: - res = meta['resolution'] + res = meta["resolution"] - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/GENERIC_INFO.txt", 'w', encoding="utf-8") as generic: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/GENERIC_INFO.txt", + "w", + encoding="utf-8", + ) as generic: generic.write(f"Name: {meta['name']}\n\n") generic.write(f"Overview: {meta['overview']}\n\n") generic.write(f"{res} / {meta['type']}{tag}\n\n") generic.write(f"Category: {meta['category']}\n") - generic.write(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n") - if meta['imdb_id'] != "0": + generic.write( + f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n" + ) + if meta["imdb_id"] != "0": generic.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") - if meta['tvdb_id'] != "0": - generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") + if meta["tvdb_id"] != "0": + generic.write( + f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n" + ) poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" - if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): - if meta.get('rehosted_poster', None) is None: - r = requests.get(meta['poster'], stream=True) + if meta.get("poster", None) not in ["", None] and not os.path.exists( + poster_img + ): + if meta.get("rehosted_poster", None) is None: + r = requests.get(meta["poster"], stream=True) if r.status_code == 200: console.print("[bold yellow]Rehosting Poster") r.raw.decode_content = True - with open(poster_img, 'wb') as f: + with open(poster_img, "wb") as f: shutil.copyfileobj(r.raw, f) - poster, dummy = self.upload_screens(meta, 1, 1, 0, 1, [poster_img], {}) + poster, dummy = self.upload_screens( + meta, 1, 1, 0, 1, [poster_img], {} + ) poster = poster[0] - generic.write(f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n") - meta['rehosted_poster'] = poster.get('raw_url', poster.get('img_url')) - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: + generic.write( + f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n" + ) + meta["rehosted_poster"] = poster.get( + "raw_url", poster.get("img_url") + ) + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", "w" + ) as metafile: json.dump(meta, metafile, indent=4) metafile.close() else: console.print("[bold yellow]Poster could not be retrieved") - elif os.path.exists(poster_img) and meta.get('rehosted_poster') is not None: + elif os.path.exists(poster_img) and meta.get("rehosted_poster") is not None: generic.write(f"TMDB Poster: {meta.get('rehosted_poster')}\n") - if len(meta['image_list']) > 0: + if len(meta["image_list"]) > 0: generic.write("\nImage Webpage:\n") - for each in meta['image_list']: + for each in meta["image_list"]: generic.write(f"{each['web_url']}\n") generic.write("\nThumbnail Image:\n") - for each in meta['image_list']: + for each in meta["image_list"]: generic.write(f"{each['img_url']}\n") - title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) + title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta["title"]) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" - torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent") + torrent_files = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent" + ) if isinstance(torrent_files, list) and len(torrent_files) > 1: for each in torrent_files: - if not each.startswith(('BASE', '[RAND')): - os.remove(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{each}")) + if not each.startswith(("BASE", "[RAND")): + os.remove( + os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{each}") + ) try: if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): - base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) - Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) + base_torrent = Torrent.read( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + ) + manual_name = re.sub( + r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta["path"]) + ) + Torrent.copy(base_torrent).write( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", + overwrite=True, + ) # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) - filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) - shutil.make_archive(archive, 'tar', f"{meta['base_dir']}/tmp/{meta['uuid']}") + filebrowser = ( + self.config["TRACKERS"].get("MANUAL", {}).get("filebrowser", None) + ) + shutil.make_archive( + archive, "tar", f"{meta['base_dir']}/tmp/{meta['uuid']}" + ) if filebrowser is not None: - url = '/'.join(s.strip('/') for s in (filebrowser, f"/tmp/{meta['uuid']}")) + url = "/".join( + s.strip("/") for s in (filebrowser, f"/tmp/{meta['uuid']}") + ) url = urllib.parse.quote(url, safe="https://") else: files = { - "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) + "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", "rb")) } - response = requests.post("https://uguu.se/upload.php", files=files).json() - if meta['debug']: + response = requests.post( + "https://uguu.se/upload.php", files=files + ).json() + if meta["debug"]: console.print(f"[cyan]{response}") - url = response['files'][0]['url'] + url = response["files"][0]["url"] return url except Exception: return False @@ -3352,15 +6587,19 @@ async def get_imdb_aka(self, imdb_id): if imdb_id == "0": return "", None ia = Cinemagoer() - result = ia.get_movie(imdb_id.replace('tt', '')) + result = ia.get_movie(imdb_id.replace("tt", "")) - original_language = result.get('language codes') + original_language = result.get("language codes") if isinstance(original_language, list): if len(original_language) > 1: original_language = None elif len(original_language) == 1: original_language = original_language[0] - aka = result.get('original title', result.get('localized title', "")).replace(' - IMDb', '').replace('\u00ae', '') + aka = ( + result.get("original title", result.get("localized title", "")) + .replace(" - IMDb", "") + .replace("\u00ae", "") + ) if aka != "": aka = f" AKA {aka}" return aka, original_language @@ -3369,7 +6608,7 @@ async def get_dvd_size(self, discs, manual_dvds): sizes = [] dvd_sizes = [] for each in discs: - sizes.append(each['size']) + sizes.append(each["size"]) grouped_sizes = [list(i) for j, i in itertools.groupby(sorted(sizes))] for each in grouped_sizes: if len(each) > 1: @@ -3384,106 +6623,122 @@ async def get_dvd_size(self, discs, manual_dvds): return compact - def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imdbid): + def get_tmdb_imdb_from_mediainfo( + self, mediainfo, category, is_disc, tmdbid, imdbid + ): if not is_disc: - if mediainfo['media']['track'][0].get('extra'): - extra = mediainfo['media']['track'][0]['extra'] + if mediainfo["media"]["track"][0].get("extra"): + extra = mediainfo["media"]["track"][0]["extra"] for each in extra: - if each.lower().startswith('tmdb'): + if each.lower().startswith("tmdb"): parser = Args(config=self.config) - category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) - if each.lower().startswith('imdb'): + category, tmdbid = parser.parse_tmdb_id( + id=extra[each], category=category + ) + if each.lower().startswith("imdb"): try: - imdbid = str(int(extra[each].replace('tt', ''))).zfill(7) + imdbid = str(int(extra[each].replace("tt", ""))).zfill(7) except Exception: pass return category, tmdbid, imdbid def daily_to_tmdb_season_episode(self, tmdbid, date): show = tmdb.TV(tmdbid) - seasons = show.info().get('seasons') - season = '1' - episode = '1' + seasons = show.info().get("seasons") + season = "1" + episode = "1" date = datetime.fromisoformat(str(date)) for each in seasons: - air_date = datetime.fromisoformat(each['air_date']) + air_date = datetime.fromisoformat(each["air_date"]) if air_date <= date: - season = str(each['season_number']) - season_info = tmdb.TV_Seasons(tmdbid, season).info().get('episodes') + season = str(each["season_number"]) + season_info = tmdb.TV_Seasons(tmdbid, season).info().get("episodes") for each in season_info: - if str(each['air_date']) == str(date): - episode = str(each['episode_number']) + if str(each["air_date"]) == str(date): + episode = str(each["episode_number"]) break else: - console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") + console.print( + f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number" + ) return season, episode async def get_imdb_info(self, imdbID, meta): imdb_info = {} - if int(str(imdbID).replace('tt', '')) != 0: + if int(str(imdbID).replace("tt", "")) != 0: ia = Cinemagoer() info = ia.get_movie(imdbID) - imdb_info['title'] = info.get('title') - imdb_info['year'] = info.get('year') - imdb_info['aka'] = info.get('original title', info.get('localized title', imdb_info['title'])).replace(' - IMDb', '') - imdb_info['type'] = info.get('kind') - imdb_info['imdbID'] = info.get('imdbID') - imdb_info['runtime'] = info.get('runtimes', ['0'])[0] - imdb_info['cover'] = info.get('full-size cover url', '').replace(".jpg", "._V1_FMjpg_UX750_.jpg") - imdb_info['plot'] = info.get('plot', [''])[0] - imdb_info['genres'] = ', '.join(info.get('genres', '')) - imdb_info['rating'] = info.get('rating', 'N/A') - imdb_info['original_language'] = info.get('language codes') - if isinstance(imdb_info['original_language'], list): - if len(imdb_info['original_language']) > 1: - imdb_info['original_language'] = None - elif len(imdb_info['original_language']) == 1: - imdb_info['original_language'] = imdb_info['original_language'][0] - if imdb_info['cover'] == '': - imdb_info['cover'] = meta.get('poster', '') - if len(info.get('directors', [])) >= 1: - imdb_info['directors'] = [] - for director in info.get('directors'): - imdb_info['directors'].append(f"nm{director.getID()}") + imdb_info["title"] = info.get("title") + imdb_info["year"] = info.get("year") + imdb_info["aka"] = info.get( + "original title", info.get("localized title", imdb_info["title"]) + ).replace(" - IMDb", "") + imdb_info["type"] = info.get("kind") + imdb_info["imdbID"] = info.get("imdbID") + imdb_info["runtime"] = info.get("runtimes", ["0"])[0] + imdb_info["cover"] = info.get("full-size cover url", "").replace( + ".jpg", "._V1_FMjpg_UX750_.jpg" + ) + imdb_info["plot"] = info.get("plot", [""])[0] + imdb_info["genres"] = ", ".join(info.get("genres", "")) + imdb_info["rating"] = info.get("rating", "N/A") + imdb_info["original_language"] = info.get("language codes") + if isinstance(imdb_info["original_language"], list): + if len(imdb_info["original_language"]) > 1: + imdb_info["original_language"] = None + elif len(imdb_info["original_language"]) == 1: + imdb_info["original_language"] = imdb_info["original_language"][0] + if imdb_info["cover"] == "": + imdb_info["cover"] = meta.get("poster", "") + if len(info.get("directors", [])) >= 1: + imdb_info["directors"] = [] + for director in info.get("directors"): + imdb_info["directors"].append(f"nm{director.getID()}") else: imdb_info = { - 'title': meta['title'], - 'year': meta['year'], - 'aka': '', - 'type': None, - 'runtime': meta.get('runtime', '60'), - 'cover': meta.get('poster'), + "title": meta["title"], + "year": meta["year"], + "aka": "", + "type": None, + "runtime": meta.get("runtime", "60"), + "cover": meta.get("poster"), } - if len(meta.get('tmdb_directors', [])) >= 1: - imdb_info['directors'] = meta['tmdb_directors'] + if len(meta.get("tmdb_directors", [])) >= 1: + imdb_info["directors"] = meta["tmdb_directors"] return imdb_info async def search_imdb(self, filename, search_year): - imdbID = '0' + imdbID = "0" ia = Cinemagoer() search = ia.search_movie(filename) for movie in search: - if filename in movie.get('title', ''): - if movie.get('year') == search_year: - imdbID = str(movie.movieID).replace('tt', '') + if filename in movie.get("title", ""): + if movie.get("year") == search_year: + imdbID = str(movie.movieID).replace("tt", "") return imdbID async def imdb_other_meta(self, meta): - imdb_info = meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) - meta['title'] = imdb_info['title'] - meta['year'] = imdb_info['year'] - meta['aka'] = imdb_info['aka'] - meta['poster'] = imdb_info['cover'] - meta['original_language'] = imdb_info['original_language'] - meta['overview'] = imdb_info['plot'] - meta['imdb_rating'] = imdb_info['rating'] - - difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() - if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): - meta['aka'] = "" - if f"({meta['year']})" in meta['aka']: - meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() + imdb_info = meta["imdb_info"] = await self.get_imdb_info(meta["imdb_id"], meta) + meta["title"] = imdb_info["title"] + meta["year"] = imdb_info["year"] + meta["aka"] = imdb_info["aka"] + meta["poster"] = imdb_info["cover"] + meta["original_language"] = imdb_info["original_language"] + meta["overview"] = imdb_info["plot"] + meta["imdb_rating"] = imdb_info["rating"] + + difference = SequenceMatcher( + None, meta["title"].lower(), meta["aka"][5:].lower() + ).ratio() + if ( + difference >= 0.9 + or meta["aka"][5:].strip() == "" + or meta["aka"][5:].strip().lower() in meta["title"].lower() + ): + meta["aka"] = "" + if f"({meta['year']})" in meta["aka"]: + meta["aka"] = meta["aka"].replace(f"({meta['year']})", "").strip() return meta async def search_tvmaze(self, filename, year, imdbID, tvdbID): @@ -3492,25 +6747,19 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID): lookup = False show = None if imdbID is None: - imdbID = '0' + imdbID = "0" if tvdbID is None: tvdbID = 0 if int(tvdbID) != 0: - params = { - "thetvdb": tvdbID - } + params = {"thetvdb": tvdbID} url = "https://api.tvmaze.com/lookup/shows" lookup = True elif int(imdbID) != 0: - params = { - "imdb": f"tt{imdbID}" - } + params = {"imdb": f"tt{imdbID}"} url = "https://api.tvmaze.com/lookup/shows" lookup = True else: - params = { - "q": filename - } + params = {"q": filename} url = "https://api.tvmaze.com/search/shows" resp = requests.get(url=url, params=params) if resp.ok: @@ -3520,20 +6769,22 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID): if lookup is True: show = resp else: - if year not in (None, ''): + if year not in (None, ""): for each in resp: - premier_date = each['show'].get('premiered', '') + premier_date = each["show"].get("premiered", "") if premier_date is not None: if premier_date.startswith(str(year)): - show = each['show'] + show = each["show"] elif len(resp) >= 1: - show = resp[0]['show'] + show = resp[0]["show"] if show is not None: - tvmazeID = show.get('id') + tvmazeID = show.get("id") if int(imdbID) == 0: - if show.get('externals', {}).get('imdb', '0') is not None: - imdbID = str(show.get('externals', {}).get('imdb', '0')).replace('tt', '') + if show.get("externals", {}).get("imdb", "0") is not None: + imdbID = str( + show.get("externals", {}).get("imdb", "0") + ).replace("tt", "") if int(tvdbID) == 0: - if show.get('externals', {}).get('tvdb', '0') is not None: - tvdbID = show.get('externals', {}).get('tvdb', '0') + if show.get("externals", {}).get("tvdb", "0") is not None: + tvdbID = show.get("externals", {}).get("tvdb", "0") return tvmazeID, imdbID, tvdbID diff --git a/src/search.py b/src/search.py index d658e175c..6406dedac 100644 --- a/src/search.py +++ b/src/search.py @@ -3,10 +3,11 @@ from src.console import console -class Search(): +class Search: """ Logic for searching """ + def __init__(self, config): self.config = config pass @@ -26,17 +27,18 @@ async def search_file(search_dir): console.print(f"Searching {search_dir}") for root, dirs, files in os.walk(search_dir, topdown=False): for name in files: - if not name.endswith('.nfo'): + if not name.endswith(".nfo"): l_name = name.lower() os_info = platform.platform() if await self.file_search(l_name, words): file_found = True # noqa F841 - if ('Windows' in os_info): - files_total_search.append(root + '\\' + name) + if "Windows" in os_info: + files_total_search.append(root + "\\" + name) else: - files_total_search.append(root + '/' + name) + files_total_search.append(root + "/" + name) return files_total_search - config_dir = self.config['DISCORD']['search_dir'] + + config_dir = self.config["DISCORD"]["search_dir"] if isinstance(config_dir, list): for each in config_dir: files = await search_file(each) @@ -67,13 +69,14 @@ async def search_dir(search_dir): if await self.file_search(l_name, words): folder_found = True # noqa F841 - if ('Windows' in os_info): - folders_total_search.append(root + '\\' + name) + if "Windows" in os_info: + folders_total_search.append(root + "\\" + name) else: - folders_total_search.append(root + '/' + name) + folders_total_search.append(root + "/" + name) return folders_total_search - config_dir = self.config['DISCORD']['search_dir'] + + config_dir = self.config["DISCORD"]["search_dir"] if isinstance(config_dir, list): for each in config_dir: folders = await search_dir(each) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 1970a8e14..0344b09c2 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -9,7 +9,7 @@ from src.console import console -class ACM(): +class ACM: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,48 +20,48 @@ class ACM(): def __init__(self, config): self.config = config - self.tracker = 'ACM' - self.source_flag = 'AsianCinema' - self.upload_url = 'https://eiga.moi/api/torrents/upload' - self.search_url = 'https://eiga.moi/api/torrents/filter' + self.tracker = "ACM" + self.source_flag = "AsianCinema" + self.upload_url = "https://eiga.moi/api/torrents/upload" + self.search_url = "https://eiga.moi/api/torrents/filter" self.signature = None self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type(self, meta): - if meta['is_disc'] == "BDMV": - bdinfo = meta['bdinfo'] + if meta["is_disc"] == "BDMV": + bdinfo = meta["bdinfo"] bd_sizes = [25, 50, 66, 100] for each in bd_sizes: - if bdinfo['size'] < each: + if bdinfo["size"] < each: bd_size = each break - if meta['uhd'] == "UHD" and bd_size != 25: + if meta["uhd"] == "UHD" and bd_size != 25: type_string = f"UHD {bd_size}" else: type_string = f"BD {bd_size}" # if type_id not in ['UHD 100', 'UHD 66', 'UHD 50', 'BD 50', 'BD 25']: # type_id = "Other" - elif meta['is_disc'] == "DVD": - if "DVD5" in meta['dvd_size']: + elif meta["is_disc"] == "DVD": + if "DVD5" in meta["dvd_size"]: type_string = "DVD 5" - elif "DVD9" in meta['dvd_size']: + elif "DVD9" in meta["dvd_size"]: type_string = "DVD 9" else: - if meta['type'] == "REMUX": - if meta['source'] == "BluRay": + if meta["type"] == "REMUX": + if meta["source"] == "BluRay": type_string = "REMUX" - if meta['uhd'] == "UHD": + if meta["uhd"] == "UHD": type_string = "UHD REMUX" else: - type_string = meta['type'] + type_string = meta["type"] # else: # acceptable_res = ["2160p", "1080p", "1080i", "720p", "576p", "576i", "540p", "480p", "Other"] # if meta['resolution'] in acceptable_res: @@ -72,103 +72,116 @@ async def get_type(self, meta): async def get_type_id(self, type): type_id = { - 'UHD 100': '1', - 'UHD 66': '2', - 'UHD 50': '3', - 'UHD REMUX': '12', - 'BD 50': '4', - 'BD 25': '5', - 'DVD 5': '14', - 'REMUX': '7', - 'WEBDL': '9', - 'SDTV': '13', - 'DVD 9': '16', - 'HDTV': '17' - }.get(type, '0') + "UHD 100": "1", + "UHD 66": "2", + "UHD 50": "3", + "UHD REMUX": "12", + "BD 50": "4", + "BD 25": "5", + "DVD 5": "14", + "REMUX": "7", + "WEBDL": "9", + "SDTV": "13", + "DVD 9": "16", + "HDTV": "17", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '2160p': '1', - '1080p': '2', - '1080i': '2', - '720p': '3', - '576p': '4', - '576i': '4', - '480p': '5', - '480i': '5' - }.get(resolution, '10') + "2160p": "1", + "1080p": "2", + "1080i": "2", + "720p": "3", + "576p": "4", + "576i": "4", + "480p": "5", + "480i": "5", + }.get(resolution, "10") return resolution_id # ACM rejects uploads with more that 4 keywords async def get_keywords(self, keywords): - if keywords != '': - keywords_list = keywords.split(',') - keywords_list = [keyword for keyword in keywords_list if " " not in keyword][:4] - keywords = ', '.join(keywords_list) + if keywords != "": + keywords_list = keywords.split(",") + keywords_list = [ + keyword for keyword in keywords_list if " " not in keyword + ][:4] + keywords = ", ".join(keywords_list) return keywords def get_subtitles(self, meta): sub_lang_map = { - ("Arabic", "ara", "ar"): 'Ara', - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 'Por-BR', - ("Bulgarian", "bul", "bg"): 'Bul', - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 'Chi', - ("Croatian", "hrv", "hr", "scr"): 'Cro', - ("Czech", "cze", "cz", "cs"): 'Cze', - ("Danish", "dan", "da"): 'Dan', - ("Dutch", "dut", "nl"): 'Dut', - ("English", "eng", "en", "English (CC)", "English - SDH"): 'Eng', - ("English - Forced", "English (Forced)", "en (Forced)"): 'Eng', - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 'Eng', - ("Estonian", "est", "et"): 'Est', - ("Finnish", "fin", "fi"): 'Fin', - ("French", "fre", "fr"): 'Fre', - ("German", "ger", "de"): 'Ger', - ("Greek", "gre", "el"): 'Gre', - ("Hebrew", "heb", "he"): 'Heb', - ("Hindi" "hin", "hi"): 'Hin', - ("Hungarian", "hun", "hu"): 'Hun', - ("Icelandic", "ice", "is"): 'Ice', - ("Indonesian", "ind", "id"): 'Ind', - ("Italian", "ita", "it"): 'Ita', - ("Japanese", "jpn", "ja"): 'Jpn', - ("Korean", "kor", "ko"): 'Kor', - ("Latvian", "lav", "lv"): 'Lav', - ("Lithuanian", "lit", "lt"): 'Lit', - ("Norwegian", "nor", "no"): 'Nor', - ("Persian", "fa", "far"): 'Per', - ("Polish", "pol", "pl"): 'Pol', - ("Portuguese", "por", "pt"): 'Por', - ("Romanian", "rum", "ro"): 'Rom', - ("Russian", "rus", "ru"): 'Rus', - ("Serbian", "srp", "sr", "scc"): 'Ser', - ("Slovak", "slo", "sk"): 'Slo', - ("Slovenian", "slv", "sl"): 'Slv', - ("Spanish", "spa", "es"): 'Spa', - ("Swedish", "swe", "sv"): 'Swe', - ("Thai", "tha", "th"): 'Tha', - ("Turkish", "tur", "tr"): 'Tur', - ("Ukrainian", "ukr", "uk"): 'Ukr', - ("Vietnamese", "vie", "vi"): 'Vie', + ("Arabic", "ara", "ar"): "Ara", + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", "pt-br"): "Por-BR", + ("Bulgarian", "bul", "bg"): "Bul", + ( + "Chinese", + "chi", + "zh", + "Chinese (Simplified)", + "Chinese (Traditional)", + ): "Chi", + ("Croatian", "hrv", "hr", "scr"): "Cro", + ("Czech", "cze", "cz", "cs"): "Cze", + ("Danish", "dan", "da"): "Dan", + ("Dutch", "dut", "nl"): "Dut", + ("English", "eng", "en", "English (CC)", "English - SDH"): "Eng", + ("English - Forced", "English (Forced)", "en (Forced)"): "Eng", + ( + "English Intertitles", + "English (Intertitles)", + "English - Intertitles", + "en (Intertitles)", + ): "Eng", + ("Estonian", "est", "et"): "Est", + ("Finnish", "fin", "fi"): "Fin", + ("French", "fre", "fr"): "Fre", + ("German", "ger", "de"): "Ger", + ("Greek", "gre", "el"): "Gre", + ("Hebrew", "heb", "he"): "Heb", + ("Hindi" "hin", "hi"): "Hin", + ("Hungarian", "hun", "hu"): "Hun", + ("Icelandic", "ice", "is"): "Ice", + ("Indonesian", "ind", "id"): "Ind", + ("Italian", "ita", "it"): "Ita", + ("Japanese", "jpn", "ja"): "Jpn", + ("Korean", "kor", "ko"): "Kor", + ("Latvian", "lav", "lv"): "Lav", + ("Lithuanian", "lit", "lt"): "Lit", + ("Norwegian", "nor", "no"): "Nor", + ("Persian", "fa", "far"): "Per", + ("Polish", "pol", "pl"): "Pol", + ("Portuguese", "por", "pt"): "Por", + ("Romanian", "rum", "ro"): "Rom", + ("Russian", "rus", "ru"): "Rus", + ("Serbian", "srp", "sr", "scc"): "Ser", + ("Slovak", "slo", "sk"): "Slo", + ("Slovenian", "slv", "sl"): "Slv", + ("Spanish", "spa", "es"): "Spa", + ("Swedish", "swe", "sv"): "Swe", + ("Thai", "tha", "th"): "Tha", + ("Turkish", "tur", "tr"): "Tur", + ("Ukrainian", "ukr", "uk"): "Ukr", + ("Vietnamese", "vie", "vi"): "Vie", } sub_langs = [] - if meta.get('is_disc', '') != 'BDMV': - mi = meta['mediainfo'] - for track in mi['media']['track']: - if track['@type'] == "Text": - language = track.get('Language') + if meta.get("is_disc", "") != "BDMV": + mi = meta["mediainfo"] + for track in mi["media"]["track"]: + if track["@type"] == "Text": + language = track.get("Language") if language == "en": - if track.get('Forced', "") == "Yes": + if track.get("Forced", "") == "Yes": language = "en (Forced)" - if "intertitles" in track.get('Title', "").lower(): + if "intertitles" in track.get("Title", "").lower(): language = "en (Intertitles)" for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) else: - for language in meta['bdinfo']['subtitles']: + for language in meta["bdinfo"]["subtitles"]: for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) @@ -179,83 +192,109 @@ def get_subtitles(self, meta): def get_subs_tag(self, subs): if subs == []: - return ' [No subs]' - elif 'Eng' in subs: - return '' + return " [No subs]" + elif "Eng" in subs: + return "" elif len(subs) > 1: - return ' [No Eng subs]' + return " [No Eng subs]" return f" [{subs[0]} subs only]" async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) + cat_id = await self.get_cat_id(meta["category"]) type_id = await self.get_type_id(await self.get_type(meta)) - resolution_id = await self.get_res_id(meta['resolution']) + resolution_id = await self.get_res_id(meta["resolution"]) await self.edit_desc(meta) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: # bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() mi_dump = None bd_dump = "" - for each in meta['discs']: - bd_dump = bd_dump + each['summary'].strip() + "\n\n" + for each in meta["discs"]: + bd_dump = bd_dump + each["summary"].strip() + "\n\n" else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': acm_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': await self.get_keywords(meta['keywords']), - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": acm_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": await self.get_keywords(meta["keywords"]), + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -270,10 +309,10 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(await self.get_type(meta)), + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdb": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(await self.get_type(meta)), # A majority of the ACM library doesn't contain resolution information # 'resolutions[]' : await self.get_res_id(meta['resolution']), # 'name' : "" @@ -282,13 +321,15 @@ async def search_existing(self, meta, disctype): try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes @@ -301,73 +342,93 @@ async def search_existing(self, meta, disctype): # return original_title async def edit_name(self, meta): - name = meta.get('name') - aka = meta.get('aka') - original_title = meta.get('original_title') - year = str(meta.get('year')) # noqa F841 - audio = meta.get('audio') - source = meta.get('source') - is_disc = meta.get('is_disc') + name = meta.get("name") + aka = meta.get("aka") + original_title = meta.get("original_title") + year = str(meta.get("year")) # noqa F841 + audio = meta.get("audio") + source = meta.get("source") + is_disc = meta.get("is_disc") subs = self.get_subtitles(meta) - resolution = meta.get('resolution') - if aka != '': + resolution = meta.get("resolution") + if aka != "": # ugly fix to remove the extra space in the title - aka = aka + ' ' + aka = aka + " " name = name.replace(aka, f' / {original_title} {chr(int("202A", 16))}') - elif aka == '': - if meta.get('title') != original_title: + elif aka == "": + if meta.get("title") != original_title: # name = f'{name[:name.find(year)]}/ {original_title} {chr(int("202A", 16))}{name[name.find(year):]}' - name = name.replace(meta['title'], f"{meta['title']} / {original_title} {chr(int('202A', 16))}") - if 'AAC' in audio: - name = name.replace(audio.strip().replace(" ", " "), audio.replace("AAC ", "AAC")) + name = name.replace( + meta["title"], + f"{meta['title']} / {original_title} {chr(int('202A', 16))}", + ) + if "AAC" in audio: + name = name.replace( + audio.strip().replace(" ", " "), audio.replace("AAC ", "AAC") + ) name = name.replace("DD+ ", "DD+") name = name.replace("UHD BluRay REMUX", "Remux") name = name.replace("BluRay REMUX", "Remux") name = name.replace("H.265", "HEVC") - if is_disc == 'DVD': - name = name.replace(f'{source} DVD5', f'{resolution} DVD {source}') - name = name.replace(f'{source} DVD9', f'{resolution} DVD {source}') - if audio == meta.get('channels'): - name = name.replace(f'{audio}', f'MPEG {audio}') + if is_disc == "DVD": + name = name.replace(f"{source} DVD5", f"{resolution} DVD {source}") + name = name.replace(f"{source} DVD9", f"{resolution} DVD {source}") + if audio == meta.get("channels"): + name = name.replace(f"{audio}", f"MPEG {audio}") name = name + self.get_subs_tag(subs) return name async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as descfile: from src.bbcode import BBCODE + # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '': - descfile.write(f"[center][b][color=#ff00ff][size=18]This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]") + if meta["type"] == "WEBDL" and meta.get("service_longname", "") != "": + descfile.write( + f"[center][b][color=#ff00ff][size=18]This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]" + ) bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") + if meta.get("discs", []) != []: + discs = meta["discs"] + if discs[0]["type"] == "DVD": + descfile.write( + f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n" + ) descfile.write("\n") if len(discs) >= 2: for each in discs[1:]: - if each['type'] == "BDMV": + if each["type"] == "BDMV": # descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") # descfile.write("\n") pass - if each['type'] == "DVD": + if each["type"] == "DVD": descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") + descfile.write( + f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n" + ) descfile.write("\n") desc = base desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) desc = bbcode.convert_comparison_to_collapse(desc, 1000) - desc = desc.replace('[img]', '[img=300]') + desc = desc.replace("[img]", "[img=300]") descfile.write(desc) - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + for each in range(len(images[: int(meta["screens"])])): + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") descfile.write("[/center]") if self.signature is not None: diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 693379d03..ca75dddb4 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -10,7 +10,7 @@ from src.console import console -class AITHER(): +class AITHER: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -18,88 +18,161 @@ class AITHER(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'AITHER' - self.source_flag = 'Aither' - self.search_url = 'https://aither.cc/api/torrents/filter' - self.upload_url = 'https://aither.cc/api/torrents/upload' - self.torrent_url = 'https://aither.cc/api/torrents/' + self.tracker = "AITHER" + self.source_flag = "Aither" + self.search_url = "https://aither.cc/api/torrents/filter" + self.upload_url = "https://aither.cc/api/torrents/upload" + self.torrent_url = "https://aither.cc/api/torrents/" self.signature = "\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', - 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', - 'Will1869', 'x0r', 'YIFY'] + self.banned_groups = [ + "4K4U", + "AROMA", + "d3g", + "edge2020", + "EMBER", + "EVO", + "FGT", + "FreetheFish", + "Hi10", + "HiQVE", + "ION10", + "iVy", + "Judas", + "LAMA", + "MeGusta", + "nikt0", + "OEPlus", + "OFT", + "OsC", + "PYC", + "QxR", + "Ralphy", + "RARBG", + "RetroPeeps", + "SAMPA", + "Sicario", + "Silence", + "SkipTT", + "SPDVD", + "STUTTERSHIT", + "SWTYBLZ", + "TAoE", + "TGx", + "Tigole", + "TSP", + "TSPxL", + "VXT", + "Weasley[HONE]", + "Will1869", + "x0r", + "YIFY", + ] pass async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') + await common.unit3d_edit_desc( + meta, self.tracker, self.signature, comparison=True + ) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + modq = await self.get_flag(meta, "modq") name = await self.edit_name(meta) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, + "name": name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, + "mod_queue_opt_in": modq, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -111,60 +184,66 @@ async def upload(self, meta, disctype): open_torrent.close() async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + config_flag = self.config["TRACKERS"][self.tracker].get(flag_name) if config_flag is not None: return 1 if config_flag else 0 return 1 if meta.get(flag_name, False) else 0 async def edit_name(self, meta): - aither_name = meta['name'] + aither_name = meta["name"] def has_english_audio(tracks=None, media_info_text=None): - if meta['is_disc'] == "BDMV" and tracks: + if meta["is_disc"] == "BDMV" and tracks: for track in tracks: - if track.get('language', '').lower() == 'english': + if track.get("language", "").lower() == "english": return True elif media_info_text: - audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + audio_section = re.findall( + r"Audio[\s\S]+?Language\s+:\s+(\w+)", media_info_text + ) for i, language in enumerate(audio_section): language = language.lower().strip() - if language.lower().startswith('en'): # Check if it's English + if language.lower().startswith("en"): # Check if it's English return True return False # Helper function to extract the audio language from MediaInfo text or BDMV structure def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): - if meta['is_disc'] == "BDMV" and tracks: - return tracks[0].get('language', '').upper() if tracks else "" + if meta["is_disc"] == "BDMV" and tracks: + return tracks[0].get("language", "").upper() if tracks else "" elif media_info_text: - match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + match = re.search(r"Audio[\s\S]+?Language\s+:\s+(\w+)", media_info_text) if match: return match.group(1).upper() return "" # Return empty string if no audio track is found - is_bdmv = meta['is_disc'] == "BDMV" # noqa #F841 - media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + is_bdmv = meta["is_disc"] == "BDMV" # noqa #F841 + media_info_tracks = meta.get("media_info_tracks", []) # noqa #F841 - if meta['is_disc'] == "BDMV": - bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) + if meta["is_disc"] == "BDMV": + bdinfo_audio = meta.get("bdinfo", {}).get("audio", []) has_eng_audio = has_english_audio(bdinfo_audio) if not has_eng_audio: audio_lang = get_audio_lang(bdinfo_audio) if audio_lang: - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + aither_name = aither_name.replace( + meta["resolution"], f"{audio_lang} {meta['resolution']}", 1 + ) else: # Handle non-BDMV content try: media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" - with open(media_info_path, 'r', encoding='utf-8') as f: + with open(media_info_path, "r", encoding="utf-8") as f: media_info_text = f.read() # Check for English audio in the text-based MediaInfo if not has_english_audio(media_info_text=media_info_text): audio_lang = get_audio_lang(media_info_text=media_info_text) if audio_lang: - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + aither_name = aither_name.replace( + meta["resolution"], f"{audio_lang} {meta['resolution']}", 1 + ) except (FileNotFoundError, KeyError) as e: print(f"Error processing MEDIAINFO.txt: {e}") @@ -172,64 +251,68 @@ def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/AL.py b/src/trackers/AL.py index dcc7b6774..662fe2d30 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -9,7 +9,7 @@ from src.console import console -class AL(): +class AL: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,124 +20,154 @@ class AL(): def __init__(self, config): self.config = config - self.tracker = 'AL' - self.source_flag = 'AnimeLovers' - self.upload_url = 'https://animelovers.club/api/torrents/upload' - self.search_url = 'https://animelovers.club/api/torrents/filter' + self.tracker = "AL" + self.source_flag = "AnimeLovers" + self.upload_url = "https://animelovers.club/api/torrents/upload" + self.search_url = "https://animelovers.club/api/torrents/filter" self.signature = None self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '1') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "1") return category_id async def get_type_id(self, type): type_id = { - 'BDMV': '1', - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'DVDISO': '7', - 'DVDRIP': '8', - 'RAW': '9', - 'BDRIP': '10', - 'COLOR': '11', - 'MONO': '12' - }.get(type, '1') + "BDMV": "1", + "DISC": "1", + "REMUX": "2", + "ENCODE": "3", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "DVDISO": "7", + "DVDRIP": "8", + "RAW": "9", + "BDRIP": "10", + "COLOR": "11", + "MONO": "12", + }.get(type, "1") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -152,30 +182,61 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes # Got this from CBR and changed the encoding rename async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "x264").replace("H 265", "x265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") + name = ( + meta["uuid"] + .replace(".mkv", "") + .replace(".mp4", "") + .replace(".", " ") + .replace("DDP2 0", "DDP2.0") + .replace("DDP5 1", "DDP5.1") + .replace("H 264", "x264") + .replace("H 265", "x265") + .replace("DD+7 1", "DDP7.1") + .replace("AAC2 0", "AAC2.0") + .replace("DD5 1", "DD5.1") + .replace("DD2 0", "DD2.0") + .replace("TrueHD 7 1", "TrueHD 7.1") + .replace("DTS-HD MA 7 1", "DTS-HD MA 7.1") + .replace("DTS-HD MA 5 1", "DTS-HD MA 5.1") + .replace("TrueHD 5 1", "TrueHD 5.1") + .replace("DTS-X 7 1", "DTS-X 7.1") + .replace("DTS-X 5 1", "DTS-X 5.1") + .replace("FLAC 2 0", "FLAC 2.0") + .replace("FLAC 2 0", "FLAC 2.0") + .replace("FLAC 5 1", "FLAC 5.1") + .replace("DD1 0", "DD1.0") + .replace("DTS ES 5 1", "DTS ES 5.1") + .replace("DTS5 1", "DTS 5.1") + .replace("AAC1 0", "AAC1.0") + .replace("DD+5 1", "DDP5.1") + .replace("DD+2 0", "DDP2.0") + .replace("DD+1 0", "DDP1.0") + ) return name diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 9e06f931f..1e149102c 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -14,7 +14,7 @@ from src.console import console -class ANT(): +class ANT: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -25,42 +25,108 @@ class ANT(): def __init__(self, config): self.config = config - self.tracker = 'ANT' - self.source_flag = 'ANT' - self.search_url = 'https://anthelion.me/api.php' - self.upload_url = 'https://anthelion.me/api.php' + self.tracker = "ANT" + self.source_flag = "ANT" + self.search_url = "https://anthelion.me/api.php" + self.upload_url = "https://anthelion.me/api.php" self.banned_groups = [ - '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', - 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', - 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', - 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', - 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', - 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', - 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT' + "3LTON", + "4yEo", + "ADE", + "AFG", + "AniHLS", + "AnimeRG", + "AniURL", + "AROMA", + "aXXo", + "Brrip", + "CHD", + "CM8", + "CrEwSaDe", + "d3g", + "DDR", + "DNL", + "DeadFish", + "ELiTE", + "eSc", + "FaNGDiNG0", + "FGT", + "Flights", + "FRDS", + "FUM", + "HAiKU", + "HD2DVD", + "HDS", + "HDTime", + "Hi10", + "ION10", + "iPlanet", + "JIVE", + "KiNGDOM", + "Leffe", + "LiGaS", + "LOAD", + "MeGusta", + "MkvCage", + "mHD", + "mSD", + "NhaNc3", + "nHD", + "NOIVTC", + "nSD", + "Oj", + "Ozlem", + "PiRaTeS", + "PRoDJi", + "RAPiDCOWS", + "RARBG", + "RetroPeeps", + "RDN", + "REsuRRecTioN", + "RMTeam", + "SANTi", + "SicFoI", + "SPASM", + "SPDVD", + "STUTTERSHIT", + "TBS", + "Telly", + "TM", + "UPiNSMOKE", + "URANiME", + "WAF", + "xRed", + "XS", + "YIFY", + "YTS", + "Zeus", + "ZKBL", + "ZmN", + "ZMNT", ] self.signature = None pass async def get_flags(self, meta): flags = [] - for each in ['Directors', 'Extended', 'Uncut', 'Unrated', '4KRemaster']: - if each in meta['edition'].replace("'", ""): + for each in ["Directors", "Extended", "Uncut", "Unrated", "4KRemaster"]: + if each in meta["edition"].replace("'", ""): flags.append(each) - for each in ['Dual-Audio', 'Atmos']: - if each in meta['audio']: - flags.append(each.replace('-', '')) - if meta.get('has_commentary', False): - flags.append('Commentary') - if meta['3D'] == "3D": - flags.append('3D') - if "HDR" in meta['hdr']: - flags.append('HDR10') - if "DV" in meta['hdr']: - flags.append('DV') - if "Criterion" in meta.get('distributor', ''): - flags.append('Criterion') - if "REMUX" in meta['type']: - flags.append('Remux') + for each in ["Dual-Audio", "Atmos"]: + if each in meta["audio"]: + flags.append(each.replace("-", "")) + if meta.get("has_commentary", False): + flags.append("Commentary") + if meta["3D"] == "3D": + flags.append("3D") + if "HDR" in meta["hdr"]: + flags.append("HDR10") + if "DV" in meta["hdr"]: + flags.append("DV") + if "Criterion" in meta.get("distributor", ""): + flags.append("Criterion") + if "REMUX" in meta["type"]: + flags.append("Remux") return flags async def upload(self, meta, disctype): @@ -73,7 +139,7 @@ async def upload(self, meta, disctype): # Calculate the total bytes consumed by all the pathnames in the torrent def calculate_pathname_bytes(files): - total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + total_pathname_bytes = sum(len(str(file).encode("utf-8")) for file in files) return total_pathname_bytes total_pathname_bytes = calculate_pathname_bytes(torrent.files) @@ -86,82 +152,123 @@ def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): return num_pieces, torrent_file_size # Check if the existing torrent fits within the constraints - num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, total_pathname_bytes, torrent.piece_size) + num_pieces, torrent_file_size = calculate_pieces_and_file_size( + total_size, total_pathname_bytes, torrent.piece_size + ) # Convert torrent file size to KiB for display torrent_file_size_kib = torrent_file_size / 1024 # If the torrent doesn't meet the constraints, ask the user if they want to regenerate it if not (1000 <= num_pieces <= 2000) or torrent_file_size > 102400: - console.print(f"[yellow]Existing .torrent is outside of ANT preferred constraints with {num_pieces} pieces and is approximately {torrent_file_size_kib:.2f} KiB.") - regenerate = cli_ui.ask_yes_no("Do you wish to regenerate the torrent?", default=True) + console.print( + f"[yellow]Existing .torrent is outside of ANT preferred constraints with {num_pieces} pieces and is approximately {torrent_file_size_kib:.2f} KiB." + ) + regenerate = cli_ui.ask_yes_no( + "Do you wish to regenerate the torrent?", default=True + ) if regenerate: - console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") + console.print( + "[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT." + ) from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + prep = Prep( + screens=meta["screens"], + img_host=meta["imghost"], + config=self.config, + ) # Override the max piece size before regenerating the torrent - meta['max_piece_size'] = '64' # 64 MiB, the maximum piece size allowed + meta["max_piece_size"] = "64" # 64 MiB, the maximum piece size allowed # Call create_torrent with the adjusted piece size - prep.create_torrent(meta, Path(meta['path']), "ANT") + prep.create_torrent(meta, Path(meta["path"]), "ANT") torrent_filename = "ANT" else: - console.print("[green]Using the existing torrent despite not meeting the preferred constraints.") + console.print( + "[green]Using the existing torrent despite not meeting the preferred constraints." + ) else: console.print("[green]Existing torrent meets the constraints.") - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + await common.edit_torrent( + meta, self.tracker, self.source_flag, torrent_filename=torrent_filename + ) flags = await self.get_flags(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' - path = os.path.join(meta['bdinfo']['path'], 'STREAM') - file_name = meta['bdinfo']['files'][0]['file'].lower() + if meta["bdinfo"] is not None: + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() + bd_dump = f"[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]" + path = os.path.join(meta["bdinfo"]["path"], "STREAM") + file_name = meta["bdinfo"]["files"][0]["file"].lower() m2ts = os.path.join(path, file_name) media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) - mi_dump = media_info_output.replace('\r\n', '\n') + mi_dump = media_info_output.replace("\r\n", "\n") else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'file_input': open_torrent} + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"file_input": open_torrent} data = { - 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'action': 'upload', - 'tmdbid': meta['tmdb'], - 'mediainfo': mi_dump, - 'flags[]': flags, - 'anonymous': anon, - 'screenshots': '\n'.join([x['raw_url'] for x in meta['image_list']][:4]) + "api_key": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "action": "upload", + "tmdbid": meta["tmdb"], + "mediainfo": mi_dump, + "flags[]": flags, + "anonymous": anon, + "screenshots": "\n".join([x["raw_url"] for x in meta["image_list"]][:4]), } - if meta['bdinfo'] is not None: - data.update({ - 'media': 'Blu-ray', - 'releasegroup': str(meta['tag'])[1:], - 'release_desc': bd_dump, - 'flagchangereason': "BDMV Uploaded with L4G's Upload Assistant"}) - if meta['scene']: + if meta["bdinfo"] is not None: + data.update( + { + "media": "Blu-ray", + "releasegroup": str(meta["tag"])[1:], + "release_desc": bd_dump, + "flagchangereason": "BDMV Uploaded with L4G's Upload Assistant", + } + ) + if meta["scene"]: # ID of "Scene?" checkbox on upload form is actually "censored" - data['censored'] = 1 + data["censored"] = 1 headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } try: - if not meta['debug']: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) + if not meta["debug"]: + response = requests.post( + url=self.upload_url, files=files, data=data, headers=headers + ) if response.status_code in [200, 201]: response_data = response.json() else: response_data = { "error": f"Unexpected status code: {response.status_code}", - "response_content": response.text # or use response.json() if JSON is expected + "response_content": response.text, # or use response.json() if JSON is expected } console.print(response_data) else: @@ -177,26 +284,28 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 't': 'search', - 'o': 'json' + "apikey": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "t": "search", + "o": "json", } - if str(meta['tmdb']) != "0": - params['tmdb'] = meta['tmdb'] - elif int(meta['imdb_id'].replace('tt', '')) != 0: - params['imdb'] = meta['imdb_id'] + if str(meta["tmdb"]) != "0": + params["tmdb"] = meta["tmdb"] + elif int(meta["imdb_id"].replace("tt", "")) != 0: + params["imdb"] = meta["imdb_id"] try: - response = requests.get(url='https://anthelion.me/api', params=params) + response = requests.get(url="https://anthelion.me/api", params=params) response = response.json() - for each in response['item']: - largest = [each][0]['files'][0] - for file in [each][0]['files']: - if int(file['size']) > int(largest['size']): + for each in response["item"]: + largest = [each][0]["files"][0] + for file in [each][0]["files"]: + if int(file["size"]) > int(largest["size"]): largest = file - result = largest['name'] + result = largest["name"] dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 910ff6e8f..235fe0178 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -11,7 +11,7 @@ from src.console import console -class BHD(): +class BHD: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -19,57 +19,97 @@ class BHD(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'BHD' - self.source_flag = 'BHD' - self.upload_url = 'https://beyond-hd.me/api/upload/' + self.tracker = "BHD" + self.source_flag = "BHD" + self.upload_url = "https://beyond-hd.me/api/upload/" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD'] + self.banned_groups = [ + "Sicario", + "TOMMY", + "x0r", + "nikt0", + "FGT", + "d3g", + "MeGusta", + "YIFY", + "tigole", + "TEKNO3D", + "C4K", + "RARBG", + "4K4U", + "EASports", + "ReaLHD", + ] pass async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - source_id = await self.get_source(meta['source']) + cat_id = await self.get_cat_id(meta["category"]) + source_id = await self.get_source(meta["source"]) type_id = await self.get_type(meta) draft = await self.get_live(meta) await self.edit_desc(meta) tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') + if meta["bdinfo"] is not None: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ) else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" files = { - 'mediainfo': mi_dump, + "mediainfo": mi_dump, } if os.path.exists(torrent_file): - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files['file'] = open_torrent.read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files["file"] = open_torrent.read() open_torrent.close() data = { - 'name': bhd_name, - 'category_id': cat_id, - 'type': type_id, - 'source': source_id, - 'imdb_id': meta['imdb_id'].replace('tt', ''), - 'tmdb_id': meta['tmdb'], - 'description': desc, - 'anon': anon, - 'sd': meta.get('sd', 0), - 'live': draft + "name": bhd_name, + "category_id": cat_id, + "type": type_id, + "source": source_id, + "imdb_id": meta["imdb_id"].replace("tt", ""), + "tmdb_id": meta["tmdb"], + "description": desc, + "anon": anon, + "sd": meta.get("sd", 0), + "live": draft, # 'internal' : 0, # 'featured' : 0, # 'free' : 0, @@ -77,39 +117,44 @@ async def upload(self, meta, disctype): # 'sticky' : 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 - if meta.get('tv_pack', 0) == 1: - data['pack'] = 1 - if meta.get('season', None) == "S00": - data['special'] = 1 - if meta.get('region', "") != "": - data['region'] = meta['region'] + if meta.get("tv_pack", 0) == 1: + data["pack"] = 1 + if meta.get("season", None) == "S00": + data["special"] = 1 + if meta.get("region", "") != "": + data["region"] = meta["region"] if custom is True: - data['custom_edition'] = edition + data["custom_edition"] = edition elif edition != "": - data['edition'] = edition + data["edition"] = edition if len(tags) > 0: - data['tags'] = ','.join(tags) + data["tags"] = ",".join(tags) headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } - url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() - if meta['debug'] is False: + url = self.upload_url + self.config["TRACKERS"][self.tracker]["api_key"].strip() + if meta["debug"] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: response = response.json() - if int(response['status_code']) == 0: + if int(response["status_code"]) == 0: console.print(f"[red]{response['status_message']}") - if response['status_message'].startswith('Invalid imdb_id'): - console.print('[yellow]RETRYING UPLOAD') - data['imdb_id'] = 1 - response = requests.post(url=url, files=files, data=data, headers=headers) + if response["status_message"].startswith("Invalid imdb_id"): + console.print("[yellow]RETRYING UPLOAD") + data["imdb_id"] = 1 + response = requests.post( + url=url, files=files, data=data, headers=headers + ) response = response.json() - elif response['satus_message'].startswith('Invalid name value'): + elif response["satus_message"].startswith("Invalid name value"): console.print(f"[bold yellow]Submitted Name: {bhd_name}") console.print(response) except Exception: @@ -121,9 +166,9 @@ async def upload(self, meta, disctype): async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '1') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "1") return category_id async def get_source(self, source): @@ -135,78 +180,106 @@ async def get_source(self, source): "Web": "WEB", "HDTV": "HDTV", "UHDTV": "HDTV", - "NTSC": "DVD", "NTSC DVD": "DVD", - "PAL": "DVD", "PAL DVD": "DVD", + "NTSC": "DVD", + "NTSC DVD": "DVD", + "PAL": "DVD", + "PAL DVD": "DVD", } source_id = sources.get(source) return source_id async def get_type(self, meta): - if meta['is_disc'] == "BDMV": - bdinfo = meta['bdinfo'] + if meta["is_disc"] == "BDMV": + bdinfo = meta["bdinfo"] bd_sizes = [25, 50, 66, 100] for each in bd_sizes: - if bdinfo['size'] < each: + if bdinfo["size"] < each: bd_size = each break - if meta['uhd'] == "UHD" and bd_size != 25: + if meta["uhd"] == "UHD" and bd_size != 25: type_id = f"UHD {bd_size}" else: type_id = f"BD {bd_size}" - if type_id not in ['UHD 100', 'UHD 66', 'UHD 50', 'BD 50', 'BD 25']: + if type_id not in ["UHD 100", "UHD 66", "UHD 50", "BD 50", "BD 25"]: type_id = "Other" - elif meta['is_disc'] == "DVD": - if "DVD5" in meta['dvd_size']: + elif meta["is_disc"] == "DVD": + if "DVD5" in meta["dvd_size"]: type_id = "DVD 5" - elif "DVD9" in meta['dvd_size']: + elif "DVD9" in meta["dvd_size"]: type_id = "DVD 9" else: - if meta['type'] == "REMUX": - if meta['source'] == "BluRay": + if meta["type"] == "REMUX": + if meta["source"] == "BluRay": type_id = "BD Remux" - if meta['source'] in ("PAL DVD", "NTSC DVD"): + if meta["source"] in ("PAL DVD", "NTSC DVD"): type_id = "DVD Remux" - if meta['uhd'] == "UHD": + if meta["uhd"] == "UHD": type_id = "UHD Remux" - if meta['source'] == "HDDVD": + if meta["source"] == "HDDVD": type_id = "Other" else: - acceptable_res = ["2160p", "1080p", "1080i", "720p", "576p", "576i", "540p", "480p", "Other"] - if meta['resolution'] in acceptable_res: - type_id = meta['resolution'] + acceptable_res = [ + "2160p", + "1080p", + "1080i", + "720p", + "576p", + "576i", + "540p", + "480p", + "Other", + ] + if meta["resolution"] in acceptable_res: + type_id = meta["resolution"] else: type_id = "Other" return type_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - desc.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]") + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc: + if meta.get("discs", []) != []: + discs = meta["discs"] + if discs[0]["type"] == "DVD": + desc.write( + f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]" + ) desc.write("\n") if len(discs) >= 2: for each in discs[1:]: - if each['type'] == "BDMV": - desc.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]") + if each["type"] == "BDMV": + desc.write( + f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]" + ) desc.write("\n") - elif each['type'] == "DVD": + elif each["type"] == "DVD": desc.write(f"{each['name']}:\n") - desc.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]") + desc.write( + f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]" + ) desc.write("\n") - elif each['type'] == "HDDVD": + elif each["type"] == "HDDVD": desc.write(f"{each['name']}:\n") - desc.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") + desc.write( + f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n" + ) desc.write("\n") desc.write(base.replace("[img]", "[img width=300]")) - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: desc.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + for each in range(len(images[: int(meta["screens"])])): + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") desc.write("[/center]") desc.write(self.signature) @@ -216,64 +289,77 @@ async def edit_desc(self, meta): async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") - category = meta['category'] - if category == 'MOVIE': + category = meta["category"] + if category == "MOVIE": tmdbID = "movie" category = "Movies" if category == "TV": tmdbID = "tv" data = { - 'action': 'search', - 'tmdb_id': f"{tmdbID}/{meta['tmdb']}", - 'categories': category, - 'types': await self.get_type(meta), + "action": "search", + "tmdb_id": f"{tmdbID}/{meta['tmdb']}", + "categories": category, + "types": await self.get_type(meta), } # Search all releases if SD - if meta['sd'] == 1: - data['categories'] = None - data['types'] = None - if meta['category'] == 'TV': - if meta.get('tv_pack', 0) == 1: - data['pack'] = 1 - data['search'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + if meta["sd"] == 1: + data["categories"] = None + data["types"] = None + if meta["category"] == "TV": + if meta.get("tv_pack", 0) == 1: + data["pack"] = 1 + data["search"] = f"{meta.get('season', '')}{meta.get('episode', '')}" url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" try: response = requests.post(url=url, data=data) response = response.json() - if response.get('status_code') == 1: - for each in response['results']: - result = each['name'] - difference = SequenceMatcher(None, meta['clean_name'].replace('DD+', 'DDP'), result).ratio() + if response.get("status_code") == 1: + for each in response["results"]: + result = each["name"] + difference = SequenceMatcher( + None, meta["clean_name"].replace("DD+", "DDP"), result + ).ratio() if difference >= 0.05: dupes.append(result) else: console.print(f"[yellow]{response.get('status_message')}") await asyncio.sleep(5) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Most likely the site is down.') + console.print( + "[bold red]Unable to search for existing torrents on site. Most likely the site is down." + ) await asyncio.sleep(5) return dupes async def get_live(self, meta): - draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() + draft = self.config["TRACKERS"][self.tracker]["draft_default"].strip() draft = bool(str2bool(str(draft))) # 0 for send to draft, 1 for live if draft: draft_int = 0 else: draft_int = 1 - if meta['draft']: + if meta["draft"]: draft_int = 0 return draft_int async def get_edition(self, meta, tags): custom = False - edition = meta.get('edition', "") + edition = meta.get("edition", "") if "Hybrid" in tags: - edition = edition.replace('Hybrid', '').strip() - editions = ['collector', 'cirector', 'extended', 'limited', 'special', 'theatrical', 'uncut', 'unrated'] + edition = edition.replace("Hybrid", "").strip() + editions = [ + "collector", + "cirector", + "extended", + "limited", + "special", + "theatrical", + "uncut", + "unrated", + ] for each in editions: - if each in meta.get('edition'): + if each in meta.get("edition"): edition = each elif edition == "": edition = "" @@ -283,46 +369,53 @@ async def get_edition(self, meta, tags): async def get_tags(self, meta): tags = [] - if meta['type'] == "WEBRIP": + if meta["type"] == "WEBRIP": tags.append("WEBRip") - if meta['type'] == "WEBDL": + if meta["type"] == "WEBDL": tags.append("WEBDL") - if meta.get('3D') == "3D": - tags.append('3D') - if "Dual-Audio" in meta.get('audio', ""): - tags.append('DualAudio') - if "Dubbed" in meta.get('audio', ""): - tags.append('EnglishDub') - if "Open Matte" in meta.get('edition', ""): + if meta.get("3D") == "3D": + tags.append("3D") + if "Dual-Audio" in meta.get("audio", ""): + tags.append("DualAudio") + if "Dubbed" in meta.get("audio", ""): + tags.append("EnglishDub") + if "Open Matte" in meta.get("edition", ""): tags.append("OpenMatte") - if meta.get('scene', False) is True: + if meta.get("scene", False) is True: tags.append("Scene") - if meta.get('personalrelease', False) is True: - tags.append('Personal') - if "hybrid" in meta.get('edition', "").lower(): - tags.append('Hybrid') - if meta.get('has_commentary', False) is True: - tags.append('Commentary') - if "DV" in meta.get('hdr', ''): - tags.append('DV') - if "HDR" in meta.get('hdr', ''): - if "HDR10+" in meta['hdr']: - tags.append('HDR10+') + if meta.get("personalrelease", False) is True: + tags.append("Personal") + if "hybrid" in meta.get("edition", "").lower(): + tags.append("Hybrid") + if meta.get("has_commentary", False) is True: + tags.append("Commentary") + if "DV" in meta.get("hdr", ""): + tags.append("DV") + if "HDR" in meta.get("hdr", ""): + if "HDR10+" in meta["hdr"]: + tags.append("HDR10+") else: - tags.append('HDR10') - if "HLG" in meta.get('hdr', ''): - tags.append('HLG') + tags.append("HDR10") + if "HLG" in meta.get("hdr", ""): + tags.append("HLG") return tags async def edit_name(self, meta): - name = meta.get('name') - if meta.get('source', '') in ('PAL DVD', 'NTSC DVD', 'DVD', 'NTSC', 'PAL'): - audio = meta.get('audio', '') - audio = ' '.join(audio.split()) + name = meta.get("name") + if meta.get("source", "") in ("PAL DVD", "NTSC DVD", "DVD", "NTSC", "PAL"): + audio = meta.get("audio", "") + audio = " ".join(audio.split()) name = name.replace(audio, f"{meta.get('video_codec')} {audio}") name = name.replace("DD+", "DDP") # if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) == True: # name = name.replace('H.264', 'x264') - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': - name = name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) + if ( + meta["category"] == "TV" + and meta.get("tv_pack", 0) == 0 + and meta.get("episode_title_storage", "").strip() != "" + and meta["episode"].strip() != "" + ): + name = name.replace( + meta["episode"], f"{meta['episode']} {meta['episode_title_storage']}", 1 + ) return name diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index 7dd05ed7d..0f2ad84cd 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -10,7 +10,7 @@ from pymediainfo import MediaInfo -class BHDTV(): +class BHDTV: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -21,11 +21,11 @@ class BHDTV(): def __init__(self, config): self.config = config - self.tracker = 'BHDTV' - self.source_flag = 'BIT-HDTV' + self.tracker = "BHDTV" + self.source_flag = "BIT-HDTV" # search not implemented # self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' - self.upload_url = 'https://www.bit-hdtv.com/takeupload.php' + self.upload_url = "https://www.bit-hdtv.com/takeupload.php" # self.forum_link = 'https://www.bit-hdtv.com/rules.php' self.banned_groups = [] pass @@ -36,58 +36,92 @@ async def upload(self, meta, disctype): await self.edit_desc(meta) cat_id = await self.get_cat_id(meta) sub_cat_id = "" - if meta['category'] == 'MOVIE': + if meta["category"] == "MOVIE": sub_cat_id = await self.get_type_movie_id(meta) - elif meta['category'] == 'TV' and not meta['tv_pack']: - sub_cat_id = await self.get_type_tv_id(meta['type']) + elif meta["category"] == "TV" and not meta["tv_pack"]: + sub_cat_id = await self.get_type_tv_id(meta["type"]) else: # must be TV pack - sub_cat_id = await self.get_type_tv_pack_id(meta['type']) + sub_cat_id = await self.get_type_tv_pack_id(meta["type"]) - resolution_id = await self.get_res_id(meta['resolution']) + resolution_id = await self.get_res_id(meta["resolution"]) # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool( - str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + is False + ): anon = 0 else: anon = 1 # noqa F841 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'file': open_torrent} - - if meta['is_disc'] != 'BDMV': + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"file": open_torrent} + + if meta["is_disc"] != "BDMV": # Beautify MediaInfo for HDT using custom template - video = meta['filelist'][0] - mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") + video = meta["filelist"][0] + mi_template = os.path.abspath( + f"{meta['base_dir']}/data/templates/MEDIAINFO.txt" + ) if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, - mediainfo_options={"inform": f"file://{mi_template}"}) + media_info = MediaInfo.parse( + video, + output="STRING", + full=False, + mediainfo_options={"inform": f"file://{mi_template}"}, + ) data = { - 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': meta['name'].replace(' ', '.').replace(':.', '.').replace(':', '.').replace('DD+', 'DDP'), - 'mediainfo': mi_dump if bd_dump is None else bd_dump, - 'cat': cat_id, - 'subcat': sub_cat_id, - 'resolution': resolution_id, + "api_key": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "name": meta["name"] + .replace(" ", ".") + .replace(":.", ".") + .replace(":", ".") + .replace("DD+", "DDP"), + "mediainfo": mi_dump if bd_dump is None else bd_dump, + "cat": cat_id, + "subcat": sub_cat_id, + "resolution": resolution_id, # 'anon': anon, # admins asked to remove short description. - 'sdescr': " ", - 'descr': media_info if bd_dump is None else "Disc so Check Mediainfo dump ", - 'screen': desc, - 'url': f"https://www.tvmaze.com/shows/{meta['tvmaze_id']}" if meta['category'] == 'TV' else f"https://www.imdb.com/title/tt{meta['imdb_id']}", - 'format': 'json' + "sdescr": " ", + "descr": media_info if bd_dump is None else "Disc so Check Mediainfo dump ", + "screen": desc, + "url": ( + f"https://www.tvmaze.com/shows/{meta['tvmaze_id']}" + if meta["category"] == "TV" + else f"https://www.imdb.com/title/tt{meta['imdb_id']}" + ), + "format": "json", } - if meta['debug'] is False: + if meta["debug"] is False: response = requests.post(url=self.upload_url, data=data, files=files) try: # pprint(data) @@ -101,98 +135,113 @@ async def upload(self, meta, disctype): console.print("[cyan]Request Data:") pprint(data) # # adding my anounce url to torrent. - if 'view' in response.json()['data']: - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['BHDTV'].get('my_announce_url'), response.json()['data']['view']) + if "view" in response.json()["data"]: + await common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.config["TRACKERS"]["BHDTV"].get("my_announce_url"), + response.json()["data"]["view"], + ) else: - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, - self.config['TRACKERS']['BHDTV'].get('my_announce_url'), - "Torrent Did not upload") + await common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.config["TRACKERS"]["BHDTV"].get("my_announce_url"), + "Torrent Did not upload", + ) open_torrent.close() async def get_cat_id(self, meta): - category_id = '0' - if meta['category'] == 'MOVIE': - category_id = '7' - elif meta['tv_pack']: - category_id = '12' + category_id = "0" + if meta["category"] == "MOVIE": + category_id = "7" + elif meta["tv_pack"]: + category_id = "12" else: # must be tv episode - category_id = '10' + category_id = "10" return category_id async def get_type_movie_id(self, meta): - type_id = '0' - test = meta['type'] # noqa F841 - if meta['type'] == 'DISC': - if meta['3D']: - type_id = '46' + type_id = "0" + test = meta["type"] # noqa F841 + if meta["type"] == "DISC": + if meta["3D"]: + type_id = "46" else: - type_id = '2' - elif meta['type'] == 'REMUX': - if str(meta['name']).__contains__('265'): - type_id = '48' - elif meta['3D']: - type_id = '45' + type_id = "2" + elif meta["type"] == "REMUX": + if str(meta["name"]).__contains__("265"): + type_id = "48" + elif meta["3D"]: + type_id = "45" else: - type_id = '2' - elif meta['type'] == 'HDTV': - type_id = '6' - elif meta['type'] == 'ENCODE': - if str(meta['name']).__contains__('265'): - type_id = '43' - elif meta['3D']: - type_id = '44' + type_id = "2" + elif meta["type"] == "HDTV": + type_id = "6" + elif meta["type"] == "ENCODE": + if str(meta["name"]).__contains__("265"): + type_id = "43" + elif meta["3D"]: + type_id = "44" else: - type_id = '1' - elif meta['type'] == 'WEBDL' or meta['type'] == 'WEBRIP': - type_id = '5' + type_id = "1" + elif meta["type"] == "WEBDL" or meta["type"] == "WEBRIP": + type_id = "5" return type_id async def get_type_tv_id(self, type): type_id = { - 'HDTV': '7', - 'WEBDL': '8', - 'WEBRIP': '8', + "HDTV": "7", + "WEBDL": "8", + "WEBRIP": "8", # 'WEBRIP': '55', # 'SD': '59', - 'ENCODE': '10', - 'REMUX': '11', - 'DISC': '12', - }.get(type, '0') + "ENCODE": "10", + "REMUX": "11", + "DISC": "12", + }.get(type, "0") return type_id async def get_type_tv_pack_id(self, type): type_id = { - 'HDTV': '13', - 'WEBDL': '14', - 'WEBRIP': '8', + "HDTV": "13", + "WEBDL": "14", + "WEBRIP": "8", # 'WEBRIP': '55', # 'SD': '59', - 'ENCODE': '16', - 'REMUX': '17', - 'DISC': '18', - }.get(type, '0') + "ENCODE": "16", + "REMUX": "17", + "DISC": "18", + }.get(type, "0") return type_id async def get_res_id(self, resolution): - resolution_id = { - '2160p': '4', - '1080p': '3', - '1080i': '2', - '720p': '1' - }.get(resolution, '10') + resolution_id = {"2160p": "4", "1080p": "3", "1080i": "2", "720p": "1"}.get( + resolution, "10" + ) return resolution_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc: desc.write(base.replace("[img=250]", "[img=250x250]")) - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: for each in range(len(images)): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] desc.write(f"[url={web_url}][img]{img_url}[/img][/url] ") # desc.write(common.get_links(meta, "[COLOR=red][size=4]", "[/size][/color]")) desc.close() @@ -200,5 +249,5 @@ async def edit_desc(self, meta): async def search_existing(self, meta, disctype): console.print("[red]Dupes must be checked Manually") - return ['Dupes must be checked Manually'] + return ["Dupes must be checked Manually"] # hopefully someone else has the time to implement this. diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 6ce0dba5d..0316b8c40 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -9,7 +9,7 @@ from src.console import console -class BLU(): +class BLU: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -17,98 +17,207 @@ class BLU(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'BLU' - self.source_flag = 'BLU' - self.search_url = 'https://blutopia.cc/api/torrents/filter' - self.torrent_url = 'https://blutopia.cc/api/torrents/' - self.upload_url = 'https://blutopia.cc/api/torrents/upload' + self.tracker = "BLU" + self.source_flag = "BLU" + self.search_url = "https://blutopia.cc/api/torrents/filter" + self.torrent_url = "https://blutopia.cc/api/torrents/" + self.upload_url = "https://blutopia.cc/api/torrents/upload" self.signature = "\n[center][url=https://blutopia.cc/forums/topics/3087/posts/42941]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', - 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'OFT', - 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', - 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', - ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] + "[Oj]", + "3LTON", + "4yEo", + "ADE", + "AFG", + "AniHLS", + "AnimeRG", + "AniURL", + "AROMA", + "aXXo", + "Brrip", + "CHD", + "CM8", + "CrEwSaDe", + "d3g", + "DeadFish", + "DNL", + "ELiTE", + "eSc", + "FaNGDiNG0", + "FGT", + "Flights", + "FRDS", + "FUM", + "HAiKU", + "HD2DVD", + "HDS", + "HDTime", + "Hi10", + "ION10", + "iPlanet", + "JIVE", + "KiNGDOM", + "Leffe", + "LEGi0N", + "LOAD", + "MeGusta", + "mHD", + "mSD", + "NhaNc3", + "nHD", + "nikt0", + "NOIVTC", + "OFT", + "nSD", + "PiRaTeS", + "playBD", + "PlaySD", + "playXD", + "PRODJi", + "RAPiDCOWS", + "RARBG", + "RetroPeeps", + "RDN", + "REsuRRecTioN", + "RMTeam", + "SANTi", + "SicFoI", + "SPASM", + "SPDVD", + "STUTTERSHIT", + "Telly", + "TM", + "TRiToN", + "UPiNSMOKE", + "URANiME", + "WAF", + "x0r", + "xRed", + "XS", + "YIFY", + "ZKBL", + "ZmN", + "ZMNT", + "AOC", + ["EVO", "Raw Content Only"], + ["TERMiNAL", "Raw Content Only"], + ["ViSION", "Note the capitalization and characters used"], + ["CMRG", "Raw Content Only"], ] pass async def upload(self, meta, disctype): common = COMMON(config=self.config) - blu_name = meta['name'] + blu_name = meta["name"] desc_header = "" - if meta.get('webdv', False): + if meta.get("webdv", False): blu_name, desc_header = await self.derived_dv_layer(meta) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True, desc_header=desc_header) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + await common.unit3d_edit_desc( + meta, self.tracker, self.signature, comparison=True, desc_header=desc_header + ) + cat_id = await self.get_cat_id(meta["category"], meta.get("edition", "")) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + modq = await self.get_flag(meta, "modq") + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", + "rb", + ) + files = { + "torrent": ("placeholder.torrent", open_torrent, "application/x-bittorrent") + } data = { - 'name': blu_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, + "name": blu_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, + "mod_queue_opt_in": modq, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -121,100 +230,110 @@ async def upload(self, meta, disctype): open_torrent.close() async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + config_flag = self.config["TRACKERS"][self.tracker].get(flag_name) if config_flag is not None: return 1 if config_flag else 0 return 1 if meta.get(flag_name, False) else 0 async def get_cat_id(self, category_name, edition): - category_id = { - 'MOVIE': '1', - 'TV': '2', - 'FANRES': '3' - }.get(category_name, '0') - if category_name == 'MOVIE' and 'FANRES' in edition: - category_id = '3' + category_id = {"MOVIE": "1", "TV": "2", "FANRES": "3"}.get(category_name, "0") + if category_name == "MOVIE" and "FANRES" in edition: + category_id = "3" return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '12' - }.get(type, '0') + "DISC": "1", + "REMUX": "3", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "12", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '11', - '2160p': '1', - '1440p': '2', - '1080p': '2', - '1080i': '3', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "11", + "2160p": "1", + "1440p": "2", + "1080p": "2", + "1080i": "3", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def derived_dv_layer(self, meta): - name = meta['name'] + name = meta["name"] desc_header = "" # Exit if not DV + HDR - if not all([x in meta['hdr'] for x in ['HDR', 'DV']]): + if not all([x in meta["hdr"] for x in ["HDR", "DV"]]): return name, desc_header import cli_ui - console.print("[bold yellow]Generating the required description addition for Derived DV Layers. Please respond appropriately.") + + console.print( + "[bold yellow]Generating the required description addition for Derived DV Layers. Please respond appropriately." + ) ask_comp = True - if meta['type'] == "WEBDL": - if cli_ui.ask_yes_no("Is the DV Layer sourced from the same service as the video?"): + if meta["type"] == "WEBDL": + if cli_ui.ask_yes_no( + "Is the DV Layer sourced from the same service as the video?" + ): ask_comp = False desc_header = "[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons not required as DV and HDR are from same provider.[/code]" if ask_comp: while desc_header == "": - desc_input = cli_ui.ask_string("Please provide comparisons between HDR masters. (link or bbcode)", default="") + desc_input = cli_ui.ask_string( + "Please provide comparisons between HDR masters. (link or bbcode)", + default="", + ) desc_header = f"[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons between HDR masters: {desc_input}[/code]" if "hybrid" not in name.lower(): if "REPACK" in name: - name = name.replace('REPACK', 'Hybrid REPACK') + name = name.replace("REPACK", "Hybrid REPACK") else: - name = name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") + name = name.replace(meta["resolution"], f"Hybrid {meta['resolution']}") return name, desc_header async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id( + meta["category"], meta.get("edition", "") + ), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index c090e80af..9ba89eb08 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -9,7 +9,7 @@ from src.console import console -class CBR(): +class CBR: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -17,14 +17,17 @@ class CBR(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'CBR' - self.source_flag = 'CapybaraBR' - self.search_url = 'https://capybarabr.com/api/torrents/filter' - self.torrent_url = 'https://capybarabr.com/api/torrents/' - self.upload_url = 'https://capybarabr.com/api/torrents/upload' - self.signature = "\n[center][img]https://i.ibb.co/tYNzwgd/thanks-cbr.png[/img][/center]" + self.tracker = "CBR" + self.source_flag = "CapybaraBR" + self.search_url = "https://capybarabr.com/api/torrents/filter" + self.torrent_url = "https://capybarabr.com/api/torrents/" + self.upload_url = "https://capybarabr.com/api/torrents/upload" + self.signature = ( + "\n[center][img]https://i.ibb.co/tYNzwgd/thanks-cbr.png[/img][/center]" + ) self.banned_groups = [""] pass @@ -32,71 +35,103 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', ''), meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + cat_id = await self.get_cat_id(meta["category"], meta.get("edition", ""), meta) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", + "rb", + ) + files = { + "torrent": ("placeholder.torrent", open_torrent, "application/x-bittorrent") + } data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -108,72 +143,102 @@ async def upload(self, meta, disctype): open_torrent.close() async def get_cat_id(self, category_name, edition, meta): - category_id = { - 'MOVIE': '1', - 'TV': '2', - 'ANIMES': '4' - }.get(category_name, '0') - if meta['anime'] is True and category_id == '2': - category_id = '4' + category_id = {"MOVIE": "1", "TV": "2", "ANIMES": "4"}.get(category_name, "0") + if meta["anime"] is True and category_id == "2": + category_id = "4" return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "ENCODE": "3", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '4320p': '1', - '2160p': '2', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9', - 'Other': '10', - }.get(resolution, '10') + "4320p": "1", + "2160p": "2", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + "Other": "10", + }.get(resolution, "10") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id( + meta["category"], meta.get("edition", ""), meta + ), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') + console.print( + "[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta" + ) await asyncio.sleep(5) return dupes async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") + name = ( + meta["uuid"] + .replace(".mkv", "") + .replace(".mp4", "") + .replace(".", " ") + .replace("DDP2 0", "DDP2.0") + .replace("DDP5 1", "DDP5.1") + .replace("H 264", "H.264") + .replace("H 265", "H.265") + .replace("DD+7 1", "DDP7.1") + .replace("AAC2 0", "AAC2.0") + .replace("DD5 1", "DD5.1") + .replace("DD2 0", "DD2.0") + .replace("TrueHD 7 1", "TrueHD 7.1") + .replace("DTS-HD MA 7 1", "DTS-HD MA 7.1") + .replace("DTS-HD MA 5 1", "DTS-HD MA 5.1") + .replace("TrueHD 5 1", "TrueHD 5.1") + .replace("DTS-X 7 1", "DTS-X 7.1") + .replace("DTS-X 5 1", "DTS-X 5.1") + .replace("FLAC 2 0", "FLAC 2.0") + .replace("FLAC 5 1", "FLAC 5.1") + .replace("DD1 0", "DD1.0") + .replace("DTS ES 5 1", "DTS ES 5.1") + .replace("DTS5 1", "DTS 5.1") + .replace("AAC1 0", "AAC1.0") + .replace("DD+5 1", "DDP5.1") + .replace("DD+2 0", "DDP2.0") + .replace("DD+1 0", "DDP1.0") + ) return name diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index a22af01a3..0641aa931 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -10,54 +10,97 @@ from src.console import console -class COMMON(): +class COMMON: def __init__(self, config): self.config = config pass async def edit_torrent(self, meta, tracker, source_flag, torrent_filename="BASE"): - if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent"): - new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent") + if os.path.exists( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent" + ): + new_torrent = Torrent.read( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}.torrent" + ) for each in list(new_torrent.metainfo): - if each not in ('announce', 'comment', 'creation date', 'created by', 'encoding', 'info'): + if each not in ( + "announce", + "comment", + "creation date", + "created by", + "encoding", + "info", + ): new_torrent.metainfo.pop(each, None) - new_torrent.metainfo['announce'] = self.config['TRACKERS'][tracker].get('announce_url', "https://fake.tracker").strip() - new_torrent.metainfo['info']['source'] = source_flag - Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) + new_torrent.metainfo["announce"] = ( + self.config["TRACKERS"][tracker] + .get("announce_url", "https://fake.tracker") + .strip() + ) + new_torrent.metainfo["info"]["source"] = source_flag + Torrent.copy(new_torrent).write( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", + overwrite=True, + ) # used to add tracker url, comment and source flag to torrent file - async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, comment): + async def add_tracker_torrent( + self, meta, tracker, source_flag, new_tracker, comment + ): if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): - new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - new_torrent.metainfo['announce'] = new_tracker - new_torrent.metainfo['comment'] = comment - new_torrent.metainfo['info']['source'] = source_flag - Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) - - async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: + new_torrent = Torrent.read( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + ) + new_torrent.metainfo["announce"] = new_tracker + new_torrent.metainfo["comment"] = comment + new_torrent.metainfo["info"]["source"] = source_flag + Torrent.copy(new_torrent).write( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", + overwrite=True, + ) + + async def unit3d_edit_desc( + self, meta, tracker, signature, comparison=False, desc_header="" + ): + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", + "w", + encoding="utf8", + ) as descfile: if desc_header != "": descfile.write(desc_header) bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") + if meta.get("discs", []) != []: + discs = meta["discs"] + if discs[0]["type"] == "DVD": + descfile.write( + f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n" + ) descfile.write("\n") if len(discs) >= 2: for each in discs[1:]: - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") + if each["type"] == "BDMV": + descfile.write( + f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n" + ) descfile.write("\n") - elif each['type'] == "DVD": + elif each["type"] == "DVD": descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") + descfile.write( + f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n" + ) descfile.write("\n") - elif each['type'] == "HDDVD": + elif each["type"] == "HDDVD": descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") + descfile.write( + f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n" + ) descfile.write("\n") desc = base desc = bbcode.convert_pre_to_code(desc) @@ -65,14 +108,14 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if comparison is False: desc = bbcode.convert_comparison_to_collapse(desc, 1000) - desc = desc.replace('[img]', '[img=300]') + desc = desc.replace("[img]", "[img=300]") descfile.write(desc) - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - raw_url = images[each]['raw_url'] + for each in range(len(images[: int(meta["screens"])])): + web_url = images[each]["web_url"] + raw_url = images[each]["raw_url"] descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") descfile.write("[/center]") @@ -83,73 +126,1988 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des async def unit3d_region_ids(self, region): region_id = { - 'AFG': 1, 'AIA': 2, 'ALA': 3, 'ALG': 4, 'AND': 5, 'ANG': 6, 'ARG': 7, 'ARM': 8, 'ARU': 9, - 'ASA': 10, 'ATA': 11, 'ATF': 12, 'ATG': 13, 'AUS': 14, 'AUT': 15, 'AZE': 16, 'BAH': 17, - 'BAN': 18, 'BDI': 19, 'BEL': 20, 'BEN': 21, 'BER': 22, 'BES': 23, 'BFA': 24, 'BHR': 25, - 'BHU': 26, 'BIH': 27, 'BLM': 28, 'BLR': 29, 'BLZ': 30, 'BOL': 31, 'BOT': 32, 'BRA': 33, - 'BRB': 34, 'BRU': 35, 'BVT': 36, 'CAM': 37, 'CAN': 38, 'CAY': 39, 'CCK': 40, 'CEE': 41, - 'CGO': 42, 'CHA': 43, 'CHI': 44, 'CHN': 45, 'CIV': 46, 'CMR': 47, 'COD': 48, 'COK': 49, - 'COL': 50, 'COM': 51, 'CPV': 52, 'CRC': 53, 'CRO': 54, 'CTA': 55, 'CUB': 56, 'CUW': 57, - 'CXR': 58, 'CYP': 59, 'DJI': 60, 'DMA': 61, 'DOM': 62, 'ECU': 63, 'EGY': 64, 'ENG': 65, - 'EQG': 66, 'ERI': 67, 'ESH': 68, 'ESP': 69, 'ETH': 70, 'FIJ': 71, 'FLK': 72, 'FRA': 73, - 'FRO': 74, 'FSM': 75, 'GAB': 76, 'GAM': 77, 'GBR': 78, 'GEO': 79, 'GER': 80, 'GGY': 81, - 'GHA': 82, 'GIB': 83, 'GLP': 84, 'GNB': 85, 'GRE': 86, 'GRL': 87, 'GRN': 88, 'GUA': 89, - 'GUF': 90, 'GUI': 91, 'GUM': 92, 'GUY': 93, 'HAI': 94, 'HKG': 95, 'HMD': 96, 'HON': 97, - 'HUN': 98, 'IDN': 99, 'IMN': 100, 'IND': 101, 'IOT': 102, 'IRL': 103, 'IRN': 104, 'IRQ': 105, - 'ISL': 106, 'ISR': 107, 'ITA': 108, 'JAM': 109, 'JEY': 110, 'JOR': 111, 'JPN': 112, 'KAZ': 113, - 'KEN': 114, 'KGZ': 115, 'KIR': 116, 'KNA': 117, 'KOR': 118, 'KSA': 119, 'KUW': 120, 'KVX': 121, - 'LAO': 122, 'LBN': 123, 'LBR': 124, 'LBY': 125, 'LCA': 126, 'LES': 127, 'LIE': 128, 'LKA': 129, - 'LUX': 130, 'MAC': 131, 'MAD': 132, 'MAF': 133, 'MAR': 134, 'MAS': 135, 'MDA': 136, 'MDV': 137, - 'MEX': 138, 'MHL': 139, 'MKD': 140, 'MLI': 141, 'MLT': 142, 'MNG': 143, 'MNP': 144, 'MON': 145, - 'MOZ': 146, 'MRI': 147, 'MSR': 148, 'MTN': 149, 'MTQ': 150, 'MWI': 151, 'MYA': 152, 'MYT': 153, - 'NAM': 154, 'NCA': 155, 'NCL': 156, 'NEP': 157, 'NFK': 158, 'NIG': 159, 'NIR': 160, 'NIU': 161, - 'NLD': 162, 'NOR': 163, 'NRU': 164, 'NZL': 165, 'OMA': 166, 'PAK': 167, 'PAN': 168, 'PAR': 169, - 'PCN': 170, 'PER': 171, 'PHI': 172, 'PLE': 173, 'PLW': 174, 'PNG': 175, 'POL': 176, 'POR': 177, - 'PRK': 178, 'PUR': 179, 'QAT': 180, 'REU': 181, 'ROU': 182, 'RSA': 183, 'RUS': 184, 'RWA': 185, - 'SAM': 186, 'SCO': 187, 'SDN': 188, 'SEN': 189, 'SEY': 190, 'SGS': 191, 'SHN': 192, 'SIN': 193, - 'SJM': 194, 'SLE': 195, 'SLV': 196, 'SMR': 197, 'SOL': 198, 'SOM': 199, 'SPM': 200, 'SRB': 201, - 'SSD': 202, 'STP': 203, 'SUI': 204, 'SUR': 205, 'SWZ': 206, 'SXM': 207, 'SYR': 208, 'TAH': 209, - 'TAN': 210, 'TCA': 211, 'TGA': 212, 'THA': 213, 'TJK': 214, 'TKL': 215, 'TKM': 216, 'TLS': 217, - 'TOG': 218, 'TRI': 219, 'TUN': 220, 'TUR': 221, 'TUV': 222, 'TWN': 223, 'UAE': 224, 'UGA': 225, - 'UKR': 226, 'UMI': 227, 'URU': 228, 'USA': 229, 'UZB': 230, 'VAN': 231, 'VAT': 232, 'VEN': 233, - 'VGB': 234, 'VIE': 235, 'VIN': 236, 'VIR': 237, 'WAL': 238, 'WLF': 239, 'YEM': 240, 'ZAM': 241, - 'ZIM': 242, 'EUR': 243 + "AFG": 1, + "AIA": 2, + "ALA": 3, + "ALG": 4, + "AND": 5, + "ANG": 6, + "ARG": 7, + "ARM": 8, + "ARU": 9, + "ASA": 10, + "ATA": 11, + "ATF": 12, + "ATG": 13, + "AUS": 14, + "AUT": 15, + "AZE": 16, + "BAH": 17, + "BAN": 18, + "BDI": 19, + "BEL": 20, + "BEN": 21, + "BER": 22, + "BES": 23, + "BFA": 24, + "BHR": 25, + "BHU": 26, + "BIH": 27, + "BLM": 28, + "BLR": 29, + "BLZ": 30, + "BOL": 31, + "BOT": 32, + "BRA": 33, + "BRB": 34, + "BRU": 35, + "BVT": 36, + "CAM": 37, + "CAN": 38, + "CAY": 39, + "CCK": 40, + "CEE": 41, + "CGO": 42, + "CHA": 43, + "CHI": 44, + "CHN": 45, + "CIV": 46, + "CMR": 47, + "COD": 48, + "COK": 49, + "COL": 50, + "COM": 51, + "CPV": 52, + "CRC": 53, + "CRO": 54, + "CTA": 55, + "CUB": 56, + "CUW": 57, + "CXR": 58, + "CYP": 59, + "DJI": 60, + "DMA": 61, + "DOM": 62, + "ECU": 63, + "EGY": 64, + "ENG": 65, + "EQG": 66, + "ERI": 67, + "ESH": 68, + "ESP": 69, + "ETH": 70, + "FIJ": 71, + "FLK": 72, + "FRA": 73, + "FRO": 74, + "FSM": 75, + "GAB": 76, + "GAM": 77, + "GBR": 78, + "GEO": 79, + "GER": 80, + "GGY": 81, + "GHA": 82, + "GIB": 83, + "GLP": 84, + "GNB": 85, + "GRE": 86, + "GRL": 87, + "GRN": 88, + "GUA": 89, + "GUF": 90, + "GUI": 91, + "GUM": 92, + "GUY": 93, + "HAI": 94, + "HKG": 95, + "HMD": 96, + "HON": 97, + "HUN": 98, + "IDN": 99, + "IMN": 100, + "IND": 101, + "IOT": 102, + "IRL": 103, + "IRN": 104, + "IRQ": 105, + "ISL": 106, + "ISR": 107, + "ITA": 108, + "JAM": 109, + "JEY": 110, + "JOR": 111, + "JPN": 112, + "KAZ": 113, + "KEN": 114, + "KGZ": 115, + "KIR": 116, + "KNA": 117, + "KOR": 118, + "KSA": 119, + "KUW": 120, + "KVX": 121, + "LAO": 122, + "LBN": 123, + "LBR": 124, + "LBY": 125, + "LCA": 126, + "LES": 127, + "LIE": 128, + "LKA": 129, + "LUX": 130, + "MAC": 131, + "MAD": 132, + "MAF": 133, + "MAR": 134, + "MAS": 135, + "MDA": 136, + "MDV": 137, + "MEX": 138, + "MHL": 139, + "MKD": 140, + "MLI": 141, + "MLT": 142, + "MNG": 143, + "MNP": 144, + "MON": 145, + "MOZ": 146, + "MRI": 147, + "MSR": 148, + "MTN": 149, + "MTQ": 150, + "MWI": 151, + "MYA": 152, + "MYT": 153, + "NAM": 154, + "NCA": 155, + "NCL": 156, + "NEP": 157, + "NFK": 158, + "NIG": 159, + "NIR": 160, + "NIU": 161, + "NLD": 162, + "NOR": 163, + "NRU": 164, + "NZL": 165, + "OMA": 166, + "PAK": 167, + "PAN": 168, + "PAR": 169, + "PCN": 170, + "PER": 171, + "PHI": 172, + "PLE": 173, + "PLW": 174, + "PNG": 175, + "POL": 176, + "POR": 177, + "PRK": 178, + "PUR": 179, + "QAT": 180, + "REU": 181, + "ROU": 182, + "RSA": 183, + "RUS": 184, + "RWA": 185, + "SAM": 186, + "SCO": 187, + "SDN": 188, + "SEN": 189, + "SEY": 190, + "SGS": 191, + "SHN": 192, + "SIN": 193, + "SJM": 194, + "SLE": 195, + "SLV": 196, + "SMR": 197, + "SOL": 198, + "SOM": 199, + "SPM": 200, + "SRB": 201, + "SSD": 202, + "STP": 203, + "SUI": 204, + "SUR": 205, + "SWZ": 206, + "SXM": 207, + "SYR": 208, + "TAH": 209, + "TAN": 210, + "TCA": 211, + "TGA": 212, + "THA": 213, + "TJK": 214, + "TKL": 215, + "TKM": 216, + "TLS": 217, + "TOG": 218, + "TRI": 219, + "TUN": 220, + "TUR": 221, + "TUV": 222, + "TWN": 223, + "UAE": 224, + "UGA": 225, + "UKR": 226, + "UMI": 227, + "URU": 228, + "USA": 229, + "UZB": 230, + "VAN": 231, + "VAT": 232, + "VEN": 233, + "VGB": 234, + "VIE": 235, + "VIN": 236, + "VIR": 237, + "WAL": 238, + "WLF": 239, + "YEM": 240, + "ZAM": 241, + "ZIM": 242, + "EUR": 243, }.get(region, 0) return region_id async def unit3d_distributor_ids(self, distributor): distributor_id = { - '01 DISTRIBUTION': 1, '100 DESTINATIONS TRAVEL FILM': 2, '101 FILMS': 3, '1FILMS': 4, '2 ENTERTAIN VIDEO': 5, '20TH CENTURY FOX': 6, '2L': 7, '3D CONTENT HUB': 8, '3D MEDIA': 9, '3L FILM': 10, '4DIGITAL': 11, '4DVD': 12, '4K ULTRA HD MOVIES': 13, '4K UHD': 13, '8-FILMS': 14, '84 ENTERTAINMENT': 15, '88 FILMS': 16, '@ANIME': 17, 'ANIME': 17, 'A CONTRACORRIENTE': 18, 'A CONTRACORRIENTE FILMS': 19, 'A&E HOME VIDEO': 20, 'A&E': 20, 'A&M RECORDS': 21, 'A+E NETWORKS': 22, 'A+R': 23, 'A-FILM': 24, 'AAA': 25, 'AB VIDÉO': 26, 'AB VIDEO': 26, 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)': 27, 'ABC': 27, 'ABKCO': 28, 'ABSOLUT MEDIEN': 29, 'ABSOLUTE': 30, 'ACCENT FILM ENTERTAINMENT': 31, 'ACCENTUS': 32, 'ACORN MEDIA': 33, 'AD VITAM': 34, 'ADA': 35, 'ADITYA VIDEOS': 36, 'ADSO FILMS': 37, 'AFM RECORDS': 38, 'AGFA': 39, 'AIX RECORDS': 40, 'ALAMODE FILM': 41, 'ALBA RECORDS': 42, 'ALBANY RECORDS': 43, 'ALBATROS': 44, 'ALCHEMY': 45, 'ALIVE': 46, 'ALL ANIME': 47, 'ALL INTERACTIVE ENTERTAINMENT': 48, 'ALLEGRO': 49, 'ALLIANCE': 50, 'ALPHA MUSIC': 51, 'ALTERDYSTRYBUCJA': 52, 'ALTERED INNOCENCE': 53, 'ALTITUDE FILM DISTRIBUTION': 54, 'ALUCARD RECORDS': 55, 'AMAZING D.C.': 56, 'AMAZING DC': 56, 'AMMO CONTENT': 57, 'AMUSE SOFT ENTERTAINMENT': 58, 'ANCONNECT': 59, 'ANEC': 60, 'ANIMATSU': 61, 'ANIME HOUSE': 62, 'ANIME LTD': 63, 'ANIME WORKS': 64, 'ANIMEIGO': 65, 'ANIPLEX': 66, 'ANOLIS ENTERTAINMENT': 67, 'ANOTHER WORLD ENTERTAINMENT': 68, 'AP INTERNATIONAL': 69, 'APPLE': 70, 'ARA MEDIA': 71, 'ARBELOS': 72, 'ARC ENTERTAINMENT': 73, 'ARP SÉLECTION': 74, 'ARP SELECTION': 74, 'ARROW': 75, 'ART SERVICE': 76, 'ART VISION': 77, 'ARTE ÉDITIONS': 78, 'ARTE EDITIONS': 78, 'ARTE VIDÉO': 79, 'ARTE VIDEO': 79, 'ARTHAUS MUSIK': 80, 'ARTIFICIAL EYE': 81, 'ARTSPLOITATION FILMS': 82, 'ARTUS FILMS': 83, 'ASCOT ELITE HOME ENTERTAINMENT': 84, 'ASIA VIDEO': 85, 'ASMIK ACE': 86, 'ASTRO RECORDS & FILMWORKS': 87, 'ASYLUM': 88, 'ATLANTIC FILM': 89, 'ATLANTIC RECORDS': 90, 'ATLAS FILM': 91, 'AUDIO VISUAL ENTERTAINMENT': 92, 'AURO-3D CREATIVE LABEL': 93, 'AURUM': 94, 'AV VISIONEN': 95, 'AV-JET': 96, 'AVALON': 97, 'AVENTI': 98, 'AVEX TRAX': 99, 'AXIOM': 100, 'AXIS RECORDS': 101, 'AYNGARAN': 102, 'BAC FILMS': 103, 'BACH FILMS': 104, 'BANDAI VISUAL': 105, 'BARCLAY': 106, 'BBC': 107, 'BRITISH BROADCASTING CORPORATION': 107, 'BBI FILMS': 108, 'BBI': 108, 'BCI HOME ENTERTAINMENT': 109, 'BEGGARS BANQUET': 110, 'BEL AIR CLASSIQUES': 111, 'BELGA FILMS': 112, 'BELVEDERE': 113, 'BENELUX FILM DISTRIBUTORS': 114, 'BENNETT-WATT MEDIA': 115, 'BERLIN CLASSICS': 116, 'BERLINER PHILHARMONIKER RECORDINGS': 117, 'BEST ENTERTAINMENT': 118, 'BEYOND HOME ENTERTAINMENT': 119, 'BFI VIDEO': 120, 'BFI': 120, 'BRITISH FILM INSTITUTE': 120, 'BFS ENTERTAINMENT': 121, 'BFS': 121, 'BHAVANI': 122, 'BIBER RECORDS': 123, 'BIG HOME VIDEO': 124, 'BILDSTÖRUNG': 125, 'BILDSTORUNG': 125, 'BILL ZEBUB': 126, 'BIRNENBLATT': 127, 'BIT WEL': 128, 'BLACK BOX': 129, 'BLACK HILL PICTURES': 130, 'BLACK HILL': 130, 'BLACK HOLE RECORDINGS': 131, 'BLACK HOLE': 131, 'BLAQOUT': 132, 'BLAUFIELD MUSIC': 133, 'BLAUFIELD': 133, 'BLOCKBUSTER ENTERTAINMENT': 134, 'BLOCKBUSTER': 134, 'BLU PHASE MEDIA': 135, 'BLU-RAY ONLY': 136, 'BLU-RAY': 136, 'BLURAY ONLY': 136, 'BLURAY': 136, 'BLUE GENTIAN RECORDS': 137, 'BLUE KINO': 138, 'BLUE UNDERGROUND': 139, 'BMG/ARISTA': 140, 'BMG': 140, 'BMGARISTA': 140, 'BMG ARISTA': 140, 'ARISTA': - 140, 'ARISTA/BMG': 140, 'ARISTABMG': 140, 'ARISTA BMG': 140, 'BONTON FILM': 141, 'BONTON': 141, 'BOOMERANG PICTURES': 142, 'BOOMERANG': 142, 'BQHL ÉDITIONS': 143, 'BQHL EDITIONS': 143, 'BQHL': 143, 'BREAKING GLASS': 144, 'BRIDGESTONE': 145, 'BRINK': 146, 'BROAD GREEN PICTURES': 147, 'BROAD GREEN': 147, 'BUSCH MEDIA GROUP': 148, 'BUSCH': 148, 'C MAJOR': 149, 'C.B.S.': 150, 'CAICHANG': 151, 'CALIFÓRNIA FILMES': 152, 'CALIFORNIA FILMES': 152, 'CALIFORNIA': 152, 'CAMEO': 153, 'CAMERA OBSCURA': 154, 'CAMERATA': 155, 'CAMP MOTION PICTURES': 156, 'CAMP MOTION': 156, 'CAPELIGHT PICTURES': 157, 'CAPELIGHT': 157, 'CAPITOL': 159, 'CAPITOL RECORDS': 159, 'CAPRICCI': 160, 'CARGO RECORDS': 161, 'CARLOTTA FILMS': 162, 'CARLOTTA': 162, 'CARLOTA': 162, 'CARMEN FILM': 163, 'CASCADE': 164, 'CATCHPLAY': 165, 'CAULDRON FILMS': 166, 'CAULDRON': 166, 'CBS TELEVISION STUDIOS': 167, 'CBS': 167, 'CCTV': 168, 'CCV ENTERTAINMENT': 169, 'CCV': 169, 'CD BABY': 170, 'CD LAND': 171, 'CECCHI GORI': 172, 'CENTURY MEDIA': 173, 'CHUAN XUN SHI DAI MULTIMEDIA': 174, 'CINE-ASIA': 175, 'CINÉART': 176, 'CINEART': 176, 'CINEDIGM': 177, 'CINEFIL IMAGICA': 178, 'CINEMA EPOCH': 179, 'CINEMA GUILD': 180, 'CINEMA LIBRE STUDIOS': 181, 'CINEMA MONDO': 182, 'CINEMATIC VISION': 183, 'CINEPLOIT RECORDS': 184, 'CINESTRANGE EXTREME': 185, 'CITEL VIDEO': 186, 'CITEL': 186, 'CJ ENTERTAINMENT': 187, 'CJ': 187, 'CLASSIC MEDIA': 188, 'CLASSICFLIX': 189, 'CLASSICLINE': 190, 'CLAUDIO RECORDS': 191, 'CLEAR VISION': 192, 'CLEOPATRA': 193, 'CLOSE UP': 194, 'CMS MEDIA LIMITED': 195, 'CMV LASERVISION': 196, 'CN ENTERTAINMENT': 197, 'CODE RED': 198, 'COHEN MEDIA GROUP': 199, 'COHEN': 199, 'COIN DE MIRE CINÉMA': 200, 'COIN DE MIRE CINEMA': 200, 'COLOSSEO FILM': 201, 'COLUMBIA': 203, 'COLUMBIA PICTURES': 203, 'COLUMBIA/TRI-STAR': 204, 'TRI-STAR': 204, 'COMMERCIAL MARKETING': 205, 'CONCORD MUSIC GROUP': 206, 'CONCORDE VIDEO': 207, 'CONDOR': 208, 'CONSTANTIN FILM': 209, 'CONSTANTIN': 209, 'CONSTANTINO FILMES': 210, 'CONSTANTINO': 210, 'CONSTRUCTIVE MEDIA SERVICE': 211, 'CONSTRUCTIVE': 211, 'CONTENT ZONE': 212, 'CONTENTS GATE': 213, 'COQUEIRO VERDE': 214, 'CORNERSTONE MEDIA': 215, 'CORNERSTONE': 215, 'CP DIGITAL': 216, 'CREST MOVIES': 217, 'CRITERION': 218, 'CRITERION COLLECTION': - 218, 'CC': 218, 'CRYSTAL CLASSICS': 219, 'CULT EPICS': 220, 'CULT FILMS': 221, 'CULT VIDEO': 222, 'CURZON FILM WORLD': 223, 'D FILMS': 224, "D'AILLY COMPANY": 225, 'DAILLY COMPANY': 225, 'D AILLY COMPANY': 225, "D'AILLY": 225, 'DAILLY': 225, 'D AILLY': 225, 'DA CAPO': 226, 'DA MUSIC': 227, "DALL'ANGELO PICTURES": 228, 'DALLANGELO PICTURES': 228, "DALL'ANGELO": 228, 'DALL ANGELO PICTURES': 228, 'DALL ANGELO': 228, 'DAREDO': 229, 'DARK FORCE ENTERTAINMENT': 230, 'DARK FORCE': 230, 'DARK SIDE RELEASING': 231, 'DARK SIDE': 231, 'DAZZLER MEDIA': 232, 'DAZZLER': 232, 'DCM PICTURES': 233, 'DCM': 233, 'DEAPLANETA': 234, 'DECCA': 235, 'DEEPJOY': 236, 'DEFIANT SCREEN ENTERTAINMENT': 237, 'DEFIANT SCREEN': 237, 'DEFIANT': 237, 'DELOS': 238, 'DELPHIAN RECORDS': 239, 'DELPHIAN': 239, 'DELTA MUSIC & ENTERTAINMENT': 240, 'DELTA MUSIC AND ENTERTAINMENT': 240, 'DELTA MUSIC ENTERTAINMENT': 240, 'DELTA MUSIC': 240, 'DELTAMAC CO. LTD.': 241, 'DELTAMAC CO LTD': 241, 'DELTAMAC CO': 241, 'DELTAMAC': 241, 'DEMAND MEDIA': 242, 'DEMAND': 242, 'DEP': 243, 'DEUTSCHE GRAMMOPHON': 244, 'DFW': 245, 'DGM': 246, 'DIAPHANA': 247, 'DIGIDREAMS STUDIOS': 248, 'DIGIDREAMS': 248, 'DIGITAL ENVIRONMENTS': 249, 'DIGITAL': 249, 'DISCOTEK MEDIA': 250, 'DISCOVERY CHANNEL': 251, 'DISCOVERY': 251, 'DISK KINO': 252, 'DISNEY / BUENA VISTA': 253, 'DISNEY': 253, 'BUENA VISTA': 253, 'DISNEY BUENA VISTA': 253, 'DISTRIBUTION SELECT': 254, 'DIVISA': 255, 'DNC ENTERTAINMENT': 256, 'DNC': 256, 'DOGWOOF': 257, 'DOLMEN HOME VIDEO': 258, 'DOLMEN': 258, 'DONAU FILM': 259, 'DONAU': 259, 'DORADO FILMS': 260, 'DORADO': 260, 'DRAFTHOUSE FILMS': 261, 'DRAFTHOUSE': 261, 'DRAGON FILM ENTERTAINMENT': 262, 'DRAGON ENTERTAINMENT': 262, 'DRAGON FILM': 262, 'DRAGON': 262, 'DREAMWORKS': 263, 'DRIVE ON RECORDS': 264, 'DRIVE ON': 264, 'DRIVE-ON': 264, 'DRIVEON': 264, 'DS MEDIA': 265, 'DTP ENTERTAINMENT AG': 266, 'DTP ENTERTAINMENT': 266, 'DTP AG': 266, 'DTP': 266, 'DTS ENTERTAINMENT': 267, 'DTS': 267, 'DUKE MARKETING': 268, 'DUKE VIDEO DISTRIBUTION': 269, 'DUKE': 269, 'DUTCH FILMWORKS': 270, 'DUTCH': 270, 'DVD INTERNATIONAL': 271, 'DVD': 271, 'DYBEX': 272, 'DYNAMIC': 273, 'DYNIT': 274, 'E1 ENTERTAINMENT': 275, 'E1': 275, 'EAGLE ENTERTAINMENT': 276, 'EAGLE HOME ENTERTAINMENT PVT.LTD.': - 277, 'EAGLE HOME ENTERTAINMENT PVTLTD': 277, 'EAGLE HOME ENTERTAINMENT PVT LTD': 277, 'EAGLE HOME ENTERTAINMENT': 277, 'EAGLE PICTURES': 278, 'EAGLE ROCK ENTERTAINMENT': 279, 'EAGLE ROCK': 279, 'EAGLE VISION MEDIA': 280, 'EAGLE VISION': 280, 'EARMUSIC': 281, 'EARTH ENTERTAINMENT': 282, 'EARTH': 282, 'ECHO BRIDGE ENTERTAINMENT': 283, 'ECHO BRIDGE': 283, 'EDEL GERMANY GMBH': 284, 'EDEL GERMANY': 284, 'EDEL RECORDS': 285, 'EDITION TONFILM': 286, 'EDITIONS MONTPARNASSE': 287, 'EDKO FILMS LTD.': 288, 'EDKO FILMS LTD': 288, 'EDKO FILMS': 288, 'EDKO': 288, "EIN'S M&M CO": 289, 'EINS M&M CO': 289, "EIN'S M&M": 289, 'EINS M&M': 289, 'ELEA-MEDIA': 290, 'ELEA MEDIA': 290, 'ELEA': 290, 'ELECTRIC PICTURE': 291, 'ELECTRIC': 291, 'ELEPHANT FILMS': 292, 'ELEPHANT': 292, 'ELEVATION': 293, 'EMI': 294, 'EMON': 295, 'EMS': 296, 'EMYLIA': 297, 'ENE MEDIA': 298, 'ENE': 298, 'ENTERTAINMENT IN VIDEO': 299, 'ENTERTAINMENT IN': 299, 'ENTERTAINMENT ONE': 300, 'ENTERTAINMENT ONE FILMS CANADA INC.': 301, 'ENTERTAINMENT ONE FILMS CANADA INC': 301, 'ENTERTAINMENT ONE FILMS CANADA': 301, 'ENTERTAINMENT ONE CANADA INC': 301, - 'ENTERTAINMENT ONE CANADA': 301, 'ENTERTAINMENTONE': 302, 'EONE': 303, 'EOS': 304, 'EPIC PICTURES': 305, 'EPIC': 305, 'EPIC RECORDS': 306, 'ERATO': 307, 'EROS': 308, 'ESC EDITIONS': 309, 'ESCAPI MEDIA BV': 310, 'ESOTERIC RECORDINGS': 311, 'ESPN FILMS': 312, 'EUREKA ENTERTAINMENT': 313, 'EUREKA': 313, 'EURO PICTURES': 314, 'EURO VIDEO': 315, 'EUROARTS': 316, 'EUROPA FILMES': 317, 'EUROPA': 317, 'EUROPACORP': 318, 'EUROZOOM': 319, 'EXCEL': 320, 'EXPLOSIVE MEDIA': 321, 'EXPLOSIVE': 321, 'EXTRALUCID FILMS': 322, 'EXTRALUCID': 322, 'EYE SEE MOVIES': 323, 'EYE SEE': 323, 'EYK MEDIA': 324, 'EYK': 324, 'FABULOUS FILMS': 325, 'FABULOUS': 325, 'FACTORIS FILMS': 326, 'FACTORIS': 326, 'FARAO RECORDS': 327, 'FARBFILM HOME ENTERTAINMENT': 328, 'FARBFILM ENTERTAINMENT': 328, 'FARBFILM HOME': 328, 'FARBFILM': 328, 'FEELGOOD ENTERTAINMENT': 329, 'FEELGOOD': 329, 'FERNSEHJUWELEN': 330, 'FILM CHEST': 331, 'FILM MEDIA': 332, 'FILM MOVEMENT': 333, 'FILM4': 334, 'FILMART': 335, 'FILMAURO': 336, 'FILMAX': 337, 'FILMCONFECT HOME ENTERTAINMENT': 338, 'FILMCONFECT ENTERTAINMENT': 338, 'FILMCONFECT HOME': 338, 'FILMCONFECT': 338, 'FILMEDIA': 339, 'FILMJUWELEN': 340, 'FILMOTEKA NARODAWA': 341, 'FILMRISE': 342, 'FINAL CUT ENTERTAINMENT': 343, 'FINAL CUT': 343, 'FIREHOUSE 12 RECORDS': 344, 'FIREHOUSE 12': 344, 'FIRST INTERNATIONAL PRODUCTION': 345, 'FIRST INTERNATIONAL': 345, 'FIRST LOOK STUDIOS': 346, 'FIRST LOOK': 346, 'FLAGMAN TRADE': 347, 'FLASHSTAR FILMES': 348, 'FLASHSTAR': 348, 'FLICKER ALLEY': 349, 'FNC ADD CULTURE': 350, 'FOCUS FILMES': 351, 'FOCUS': 351, 'FOKUS MEDIA': 352, 'FOKUSA': 352, 'FOX PATHE EUROPA': 353, 'FOX PATHE': 353, 'FOX EUROPA': 353, 'FOX/MGM': 354, 'FOX MGM': 354, 'MGM': 354, 'MGM/FOX': 354, 'FOX': 354, 'FPE': 355, 'FRANCE TÉLÉVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS': 356, 'FRANCE': 356, 'FREE DOLPHIN ENTERTAINMENT': 357, 'FREE DOLPHIN': 357, 'FREESTYLE DIGITAL MEDIA': 358, 'FREESTYLE DIGITAL': 358, 'FREESTYLE': 358, 'FREMANTLE HOME ENTERTAINMENT': 359, 'FREMANTLE ENTERTAINMENT': 359, 'FREMANTLE HOME': 359, 'FREMANTL': 359, 'FRENETIC FILMS': 360, 'FRENETIC': 360, 'FRONTIER WORKS': 361, 'FRONTIER': 361, 'FRONTIERS MUSIC': 362, 'FRONTIERS RECORDS': 363, 'FS FILM OY': 364, 'FS FILM': - 364, 'FULL MOON FEATURES': 365, 'FULL MOON': 365, 'FUN CITY EDITIONS': 366, 'FUN CITY': 366, 'FUNIMATION ENTERTAINMENT': 367, 'FUNIMATION': 367, 'FUSION': 368, 'FUTUREFILM': 369, 'G2 PICTURES': 370, 'G2': 370, 'GAGA COMMUNICATIONS': 371, 'GAGA': 371, 'GAIAM': 372, 'GALAPAGOS': 373, 'GAMMA HOME ENTERTAINMENT': 374, 'GAMMA ENTERTAINMENT': 374, 'GAMMA HOME': 374, 'GAMMA': 374, 'GARAGEHOUSE PICTURES': 375, 'GARAGEHOUSE': 375, 'GARAGEPLAY (車庫娛樂)': 376, '車庫娛樂': 376, 'GARAGEPLAY (Che Ku Yu Le )': 376, 'GARAGEPLAY': 376, 'Che Ku Yu Le': 376, 'GAUMONT': 377, 'GEFFEN': 378, 'GENEON ENTERTAINMENT': 379, 'GENEON': 379, 'GENEON UNIVERSAL ENTERTAINMENT': 380, 'GENERAL VIDEO RECORDING': 381, 'GLASS DOLL FILMS': 382, 'GLASS DOLL': 382, 'GLOBE MUSIC MEDIA': 383, 'GLOBE MUSIC': 383, 'GLOBE MEDIA': 383, 'GLOBE': 383, 'GO ENTERTAIN': 384, 'GO': 384, 'GOLDEN HARVEST': 385, 'GOOD!MOVIES': 386, - 'GOOD! MOVIES': 386, 'GOOD MOVIES': 386, 'GRAPEVINE VIDEO': 387, 'GRAPEVINE': 387, 'GRASSHOPPER FILM': 388, 'GRASSHOPPER FILMS': 388, 'GRASSHOPPER': 388, 'GRAVITAS VENTURES': 389, 'GRAVITAS': 389, 'GREAT MOVIES': 390, 'GREAT': 390, - 'GREEN APPLE ENTERTAINMENT': 391, 'GREEN ENTERTAINMENT': 391, 'GREEN APPLE': 391, 'GREEN': 391, 'GREENNARAE MEDIA': 392, 'GREENNARAE': 392, 'GRINDHOUSE RELEASING': 393, 'GRINDHOUSE': 393, 'GRIND HOUSE': 393, 'GRYPHON ENTERTAINMENT': 394, 'GRYPHON': 394, 'GUNPOWDER & SKY': 395, 'GUNPOWDER AND SKY': 395, 'GUNPOWDER SKY': 395, 'GUNPOWDER + SKY': 395, 'GUNPOWDER': 395, 'HANABEE ENTERTAINMENT': 396, 'HANABEE': 396, 'HANNOVER HOUSE': 397, 'HANNOVER': 397, 'HANSESOUND': 398, 'HANSE SOUND': 398, 'HANSE': 398, 'HAPPINET': 399, 'HARMONIA MUNDI': 400, 'HARMONIA': 400, 'HBO': 401, 'HDC': 402, 'HEC': 403, 'HELL & BACK RECORDINGS': 404, 'HELL AND BACK RECORDINGS': 404, 'HELL & BACK': 404, 'HELL AND BACK': 404, "HEN'S TOOTH VIDEO": 405, 'HENS TOOTH VIDEO': 405, "HEN'S TOOTH": 405, 'HENS TOOTH': 405, 'HIGH FLIERS': 406, 'HIGHLIGHT': 407, 'HILLSONG': 408, 'HISTORY CHANNEL': 409, 'HISTORY': 409, 'HK VIDÉO': 410, 'HK VIDEO': 410, 'HK': 410, 'HMH HAMBURGER MEDIEN HAUS': 411, 'HAMBURGER MEDIEN HAUS': 411, 'HMH HAMBURGER MEDIEN': 411, 'HMH HAMBURGER': 411, 'HMH': 411, 'HOLLYWOOD CLASSIC ENTERTAINMENT': 412, 'HOLLYWOOD CLASSIC': 412, 'HOLLYWOOD PICTURES': 413, 'HOLLYWOOD': 413, 'HOPSCOTCH ENTERTAINMENT': 414, 'HOPSCOTCH': 414, 'HPM': 415, 'HÄNNSLER CLASSIC': 416, 'HANNSLER CLASSIC': 416, 'HANNSLER': 416, 'I-CATCHER': 417, 'I CATCHER': 417, 'ICATCHER': 417, 'I-ON NEW MEDIA': 418, 'I ON NEW MEDIA': 418, 'ION NEW MEDIA': 418, 'ION MEDIA': 418, 'I-ON': 418, 'ION': 418, 'IAN PRODUCTIONS': 419, 'IAN': 419, 'ICESTORM': 420, 'ICON FILM DISTRIBUTION': 421, 'ICON DISTRIBUTION': 421, 'ICON FILM': 421, 'ICON': 421, 'IDEALE AUDIENCE': 422, 'IDEALE': 422, 'IFC FILMS': 423, 'IFC': 423, 'IFILM': 424, 'ILLUSIONS UNLTD.': 425, 'ILLUSIONS UNLTD': 425, 'ILLUSIONS': 425, 'IMAGE ENTERTAINMENT': 426, 'IMAGE': 426, - 'IMAGEM FILMES': 427, 'IMAGEM': 427, 'IMOVISION': 428, 'IMPERIAL CINEPIX': 429, 'IMPRINT': 430, 'IMPULS HOME ENTERTAINMENT': 431, 'IMPULS ENTERTAINMENT': 431, 'IMPULS HOME': 431, 'IMPULS': 431, 'IN-AKUSTIK': 432, 'IN AKUSTIK': 432, 'INAKUSTIK': 432, 'INCEPTION MEDIA GROUP': 433, 'INCEPTION MEDIA': 433, 'INCEPTION GROUP': 433, 'INCEPTION': 433, 'INDEPENDENT': 434, 'INDICAN': 435, 'INDIE RIGHTS': 436, 'INDIE': 436, 'INDIGO': 437, 'INFO': 438, 'INJOINGAN': 439, 'INKED PICTURES': 440, 'INKED': 440, 'INSIDE OUT MUSIC': 441, 'INSIDE MUSIC': 441, 'INSIDE OUT': 441, 'INSIDE': 441, 'INTERCOM': 442, 'INTERCONTINENTAL VIDEO': 443, 'INTERCONTINENTAL': 443, 'INTERGROOVE': 444, - 'INTERSCOPE': 445, 'INVINCIBLE PICTURES': 446, 'INVINCIBLE': 446, 'ISLAND/MERCURY': 447, 'ISLAND MERCURY': 447, 'ISLANDMERCURY': 447, 'ISLAND & MERCURY': 447, 'ISLAND AND MERCURY': 447, 'ISLAND': 447, 'ITN': 448, 'ITV DVD': 449, 'ITV': 449, 'IVC': 450, 'IVE ENTERTAINMENT': 451, 'IVE': 451, 'J&R ADVENTURES': 452, 'J&R': 452, 'JR': 452, 'JAKOB': 453, 'JONU MEDIA': 454, 'JONU': 454, 'JRB PRODUCTIONS': 455, 'JRB': 455, 'JUST BRIDGE ENTERTAINMENT': 456, 'JUST BRIDGE': 456, 'JUST ENTERTAINMENT': 456, 'JUST': 456, 'KABOOM ENTERTAINMENT': 457, 'KABOOM': 457, 'KADOKAWA ENTERTAINMENT': 458, 'KADOKAWA': 458, 'KAIROS': 459, 'KALEIDOSCOPE ENTERTAINMENT': 460, 'KALEIDOSCOPE': 460, 'KAM & RONSON ENTERPRISES': 461, 'KAM & RONSON': 461, 'KAM&RONSON ENTERPRISES': 461, 'KAM&RONSON': 461, 'KAM AND RONSON ENTERPRISES': 461, 'KAM AND RONSON': 461, 'KANA HOME VIDEO': 462, 'KARMA FILMS': 463, 'KARMA': 463, 'KATZENBERGER': 464, 'KAZE': 465, 'KBS MEDIA': 466, 'KBS': 466, 'KD MEDIA': 467, 'KD': 467, 'KING MEDIA': 468, 'KING': 468, 'KING RECORDS': 469, 'KINO LORBER': 470, 'KINO': 470, 'KINO SWIAT': 471, 'KINOKUNIYA': 472, 'KINOWELT HOME ENTERTAINMENT/DVD': 473, 'KINOWELT HOME ENTERTAINMENT': 473, 'KINOWELT ENTERTAINMENT': 473, 'KINOWELT HOME DVD': 473, 'KINOWELT ENTERTAINMENT/DVD': 473, 'KINOWELT DVD': 473, 'KINOWELT': 473, 'KIT PARKER FILMS': 474, 'KIT PARKER': 474, 'KITTY MEDIA': 475, 'KNM HOME ENTERTAINMENT': 476, 'KNM ENTERTAINMENT': 476, 'KNM HOME': 476, 'KNM': 476, 'KOBA FILMS': 477, 'KOBA': 477, 'KOCH ENTERTAINMENT': 478, 'KOCH MEDIA': 479, 'KOCH': 479, 'KRAKEN RELEASING': 480, 'KRAKEN': 480, 'KSCOPE': 481, 'KSM': 482, 'KULTUR': 483, "L'ATELIER D'IMAGES": 484, "LATELIER D'IMAGES": 484, "L'ATELIER DIMAGES": 484, 'LATELIER DIMAGES': 484, "L ATELIER D'IMAGES": 484, "L'ATELIER D IMAGES": 484, - 'L ATELIER D IMAGES': 484, "L'ATELIER": 484, 'L ATELIER': 484, 'LATELIER': 484, 'LA AVENTURA AUDIOVISUAL': 485, 'LA AVENTURA': 485, 'LACE GROUP': 486, 'LACE': 486, 'LASER PARADISE': 487, 'LAYONS': 488, 'LCJ EDITIONS': 489, 'LCJ': 489, 'LE CHAT QUI FUME': 490, 'LE PACTE': 491, 'LEDICK FILMHANDEL': 492, 'LEGEND': 493, 'LEOMARK STUDIOS': 494, 'LEOMARK': 494, 'LEONINE FILMS': 495, 'LEONINE': 495, 'LICHTUNG MEDIA LTD': 496, 'LICHTUNG LTD': 496, 'LICHTUNG MEDIA LTD.': 496, 'LICHTUNG LTD.': 496, 'LICHTUNG MEDIA': 496, 'LICHTUNG': 496, 'LIGHTHOUSE HOME ENTERTAINMENT': 497, 'LIGHTHOUSE ENTERTAINMENT': 497, 'LIGHTHOUSE HOME': 497, 'LIGHTHOUSE': 497, 'LIGHTYEAR': 498, 'LIONSGATE FILMS': 499, 'LIONSGATE': 499, 'LIZARD CINEMA TRADE': 500, 'LLAMENTOL': 501, 'LOBSTER FILMS': 502, 'LOBSTER': 502, 'LOGON': 503, 'LORBER FILMS': 504, 'LORBER': 504, 'LOS BANDITOS FILMS': 505, 'LOS BANDITOS': 505, 'LOUD & PROUD RECORDS': 506, 'LOUD AND PROUD RECORDS': 506, 'LOUD & PROUD': 506, 'LOUD AND PROUD': 506, 'LSO LIVE': 507, 'LUCASFILM': 508, 'LUCKY RED': 509, 'LUMIÈRE HOME ENTERTAINMENT': 510, 'LUMIERE HOME ENTERTAINMENT': 510, 'LUMIERE ENTERTAINMENT': 510, 'LUMIERE HOME': 510, 'LUMIERE': 510, 'M6 VIDEO': 511, 'M6': 511, 'MAD DIMENSION': 512, 'MADMAN ENTERTAINMENT': 513, 'MADMAN': 513, 'MAGIC BOX': 514, 'MAGIC PLAY': 515, 'MAGNA HOME ENTERTAINMENT': 516, 'MAGNA ENTERTAINMENT': 516, 'MAGNA HOME': 516, 'MAGNA': 516, 'MAGNOLIA PICTURES': 517, 'MAGNOLIA': 517, 'MAIDEN JAPAN': 518, 'MAIDEN': 518, 'MAJENG MEDIA': 519, 'MAJENG': 519, 'MAJESTIC HOME ENTERTAINMENT': 520, 'MAJESTIC ENTERTAINMENT': 520, 'MAJESTIC HOME': 520, 'MAJESTIC': 520, 'MANGA HOME ENTERTAINMENT': 521, 'MANGA ENTERTAINMENT': 521, 'MANGA HOME': 521, 'MANGA': 521, 'MANTA LAB': 522, 'MAPLE STUDIOS': 523, 'MAPLE': 523, 'MARCO POLO PRODUCTION': - 524, 'MARCO POLO': 524, 'MARIINSKY': 525, 'MARVEL STUDIOS': 526, 'MARVEL': 526, 'MASCOT RECORDS': 527, 'MASCOT': 527, 'MASSACRE VIDEO': 528, 'MASSACRE': 528, 'MATCHBOX': 529, 'MATRIX D': 530, 'MAXAM': 531, 'MAYA HOME ENTERTAINMENT': 532, 'MAYA ENTERTAINMENT': 532, 'MAYA HOME': 532, 'MAYAT': 532, 'MDG': 533, 'MEDIA BLASTERS': 534, 'MEDIA FACTORY': 535, 'MEDIA TARGET DISTRIBUTION': 536, 'MEDIA TARGET': 536, 'MEDIAINVISION': 537, 'MEDIATOON': 538, 'MEDIATRES ESTUDIO': 539, 'MEDIATRES STUDIO': 539, 'MEDIATRES': 539, 'MEDICI ARTS': 540, 'MEDICI CLASSICS': 541, 'MEDIUMRARE ENTERTAINMENT': 542, 'MEDIUMRARE': 542, 'MEDUSA': 543, 'MEGASTAR': 544, 'MEI AH': 545, 'MELI MÉDIAS': 546, 'MELI MEDIAS': 546, 'MEMENTO FILMS': 547, 'MEMENTO': 547, 'MENEMSHA FILMS': 548, 'MENEMSHA': 548, 'MERCURY': 549, 'MERCURY STUDIOS': 550, 'MERGE SOFT PRODUCTIONS': 551, 'MERGE PRODUCTIONS': 551, 'MERGE SOFT': 551, 'MERGE': 551, 'METAL BLADE RECORDS': 552, 'METAL BLADE': 552, 'METEOR': 553, 'METRO-GOLDWYN-MAYER': 554, 'METRO GOLDWYN MAYER': 554, 'METROGOLDWYNMAYER': 554, 'METRODOME VIDEO': 555, 'METRODOME': 555, 'METROPOLITAN': 556, 'MFA+': - 557, 'MFA': 557, 'MIG FILMGROUP': 558, 'MIG': 558, 'MILESTONE': 559, 'MILL CREEK ENTERTAINMENT': 560, 'MILL CREEK': 560, 'MILLENNIUM MEDIA': 561, 'MILLENNIUM': 561, 'MIRAGE ENTERTAINMENT': 562, 'MIRAGE': 562, 'MIRAMAX': 563, - 'MISTERIYA ZVUKA': 564, 'MK2': 565, 'MODE RECORDS': 566, 'MODE': 566, 'MOMENTUM PICTURES': 567, 'MONDO HOME ENTERTAINMENT': 568, 'MONDO ENTERTAINMENT': 568, 'MONDO HOME': 568, 'MONDO MACABRO': 569, 'MONGREL MEDIA': 570, 'MONOLIT': 571, 'MONOLITH VIDEO': 572, 'MONOLITH': 572, 'MONSTER PICTURES': 573, 'MONSTER': 573, 'MONTEREY VIDEO': 574, 'MONTEREY': 574, 'MONUMENT RELEASING': 575, 'MONUMENT': 575, 'MORNINGSTAR': 576, 'MORNING STAR': 576, 'MOSERBAER': 577, 'MOVIEMAX': 578, 'MOVINSIDE': 579, 'MPI MEDIA GROUP': 580, 'MPI MEDIA': 580, 'MPI': 580, 'MR. BONGO FILMS': 581, 'MR BONGO FILMS': 581, 'MR BONGO': 581, 'MRG (MERIDIAN)': 582, 'MRG MERIDIAN': 582, 'MRG': 582, 'MERIDIAN': 582, 'MUBI': 583, 'MUG SHOT PRODUCTIONS': 584, 'MUG SHOT': 584, 'MULTIMUSIC': 585, 'MULTI-MUSIC': 585, 'MULTI MUSIC': 585, 'MUSE': 586, 'MUSIC BOX FILMS': 587, 'MUSIC BOX': 587, 'MUSICBOX': 587, 'MUSIC BROKERS': 588, 'MUSIC THEORIES': 589, 'MUSIC VIDEO DISTRIBUTORS': 590, 'MUSIC VIDEO': 590, 'MUSTANG ENTERTAINMENT': 591, 'MUSTANG': 591, 'MVD VISUAL': 592, 'MVD': 592, 'MVD/VSC': 593, 'MVL': 594, 'MVM ENTERTAINMENT': 595, 'MVM': 595, 'MYNDFORM': 596, 'MYSTIC NIGHT PICTURES': 597, 'MYSTIC NIGHT': 597, 'NAMELESS MEDIA': 598, 'NAMELESS': 598, 'NAPALM RECORDS': 599, 'NAPALM': 599, 'NATIONAL ENTERTAINMENT MEDIA': 600, 'NATIONAL ENTERTAINMENT': 600, 'NATIONAL MEDIA': 600, 'NATIONAL FILM ARCHIVE': 601, 'NATIONAL ARCHIVE': 601, 'NATIONAL FILM': 601, 'NATIONAL GEOGRAPHIC': 602, 'NAT GEO TV': 602, 'NAT GEO': 602, 'NGO': 602, 'NAXOS': 603, 'NBCUNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBC UNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBCUNIVERSAL JAPAN': 604, 'NBC UNIVERSAL JAPAN': 604, 'NBC JAPAN': 604, 'NBO ENTERTAINMENT': 605, 'NBO': 605, 'NEOS': 606, 'NETFLIX': 607, 'NETWORK': 608, 'NEW BLOOD': 609, 'NEW DISC': 610, 'NEW KSM': 611, 'NEW LINE CINEMA': 612, 'NEW LINE': 612, 'NEW MOVIE TRADING CO. LTD': 613, 'NEW MOVIE TRADING CO LTD': 613, 'NEW MOVIE TRADING CO': 613, 'NEW MOVIE TRADING': 613, 'NEW WAVE FILMS': 614, 'NEW WAVE': 614, 'NFI': 615, - 'NHK': 616, 'NIPPONART': 617, 'NIS AMERICA': 618, 'NJUTAFILMS': 619, 'NOBLE ENTERTAINMENT': 620, 'NOBLE': 620, 'NORDISK FILM': 621, 'NORDISK': 621, 'NORSK FILM': 622, 'NORSK': 622, 'NORTH AMERICAN MOTION PICTURES': 623, 'NOS AUDIOVISUAIS': 624, 'NOTORIOUS PICTURES': 625, 'NOTORIOUS': 625, 'NOVA MEDIA': 626, 'NOVA': 626, 'NOVA SALES AND DISTRIBUTION': 627, 'NOVA SALES & DISTRIBUTION': 627, 'NSM': 628, 'NSM RECORDS': 629, 'NUCLEAR BLAST': 630, 'NUCLEUS FILMS': 631, 'NUCLEUS': 631, 'OBERLIN MUSIC': 632, 'OBERLIN': 632, 'OBRAS-PRIMAS DO CINEMA': 633, 'OBRAS PRIMAS DO CINEMA': 633, 'OBRASPRIMAS DO CINEMA': 633, 'OBRAS-PRIMAS CINEMA': 633, 'OBRAS PRIMAS CINEMA': 633, 'OBRASPRIMAS CINEMA': 633, 'OBRAS-PRIMAS': 633, 'OBRAS PRIMAS': 633, 'OBRASPRIMAS': 633, 'ODEON': 634, 'OFDB FILMWORKS': 635, 'OFDB': 635, 'OLIVE FILMS': 636, 'OLIVE': 636, 'ONDINE': 637, 'ONSCREEN FILMS': 638, 'ONSCREEN': 638, 'OPENING DISTRIBUTION': 639, 'OPERA AUSTRALIA': 640, 'OPTIMUM HOME ENTERTAINMENT': 641, 'OPTIMUM ENTERTAINMENT': 641, 'OPTIMUM HOME': 641, 'OPTIMUM': 641, 'OPUS ARTE': 642, 'ORANGE STUDIO': 643, 'ORANGE': 643, 'ORLANDO EASTWOOD FILMS': 644, 'ORLANDO FILMS': 644, 'ORLANDO EASTWOOD': 644, 'ORLANDO': 644, 'ORUSTAK PICTURES': 645, 'ORUSTAK': 645, 'OSCILLOSCOPE PICTURES': 646, 'OSCILLOSCOPE': 646, 'OUTPLAY': 647, 'PALISADES TARTAN': 648, 'PAN VISION': 649, 'PANVISION': 649, 'PANAMINT CINEMA': 650, 'PANAMINT': 650, 'PANDASTORM ENTERTAINMENT': 651, 'PANDA STORM ENTERTAINMENT': 651, 'PANDASTORM': 651, 'PANDA STORM': 651, 'PANDORA FILM': 652, 'PANDORA': 652, 'PANEGYRIC': 653, 'PANORAMA': 654, 'PARADE DECK FILMS': 655, 'PARADE DECK': 655, 'PARADISE': 656, 'PARADISO FILMS': 657, 'PARADOX': 658, 'PARAMOUNT PICTURES': 659, 'PARAMOUNT': 659, 'PARIS FILMES': 660, 'PARIS FILMS': 660, 'PARIS': 660, 'PARK CIRCUS': 661, 'PARLOPHONE': 662, 'PASSION RIVER': 663, 'PATHE DISTRIBUTION': 664, 'PATHE': 664, 'PBS': 665, 'PEACE ARCH TRINITY': 666, 'PECCADILLO PICTURES': 667, 'PEPPERMINT': 668, 'PHASE 4 FILMS': 669, 'PHASE 4': 669, 'PHILHARMONIA BAROQUE': 670, 'PICTURE HOUSE ENTERTAINMENT': 671, 'PICTURE ENTERTAINMENT': 671, 'PICTURE HOUSE': 671, 'PICTURE': 671, 'PIDAX': 672, 'PINK FLOYD RECORDS': 673, 'PINK FLOYD': 673, 'PINNACLE FILMS': 674, 'PINNACLE': 674, 'PLAIN': 675, 'PLATFORM ENTERTAINMENT LIMITED': 676, 'PLATFORM ENTERTAINMENT LTD': 676, 'PLATFORM ENTERTAINMENT LTD.': 676, 'PLATFORM ENTERTAINMENT': 676, 'PLATFORM': 676, 'PLAYARTE': 677, 'PLG UK CLASSICS': 678, 'PLG UK': - 678, 'PLG': 678, 'POLYBAND & TOPPIC VIDEO/WVG': 679, 'POLYBAND AND TOPPIC VIDEO/WVG': 679, 'POLYBAND & TOPPIC VIDEO WVG': 679, 'POLYBAND & TOPPIC VIDEO AND WVG': 679, 'POLYBAND & TOPPIC VIDEO & WVG': 679, 'POLYBAND AND TOPPIC VIDEO WVG': 679, 'POLYBAND AND TOPPIC VIDEO AND WVG': 679, 'POLYBAND AND TOPPIC VIDEO & WVG': 679, 'POLYBAND & TOPPIC VIDEO': 679, 'POLYBAND AND TOPPIC VIDEO': 679, 'POLYBAND & TOPPIC': 679, 'POLYBAND AND TOPPIC': 679, 'POLYBAND': 679, 'WVG': 679, 'POLYDOR': 680, 'PONY': 681, 'PONY CANYON': 682, 'POTEMKINE': 683, 'POWERHOUSE FILMS': 684, 'POWERHOUSE': 684, 'POWERSTATIOM': 685, 'PRIDE & JOY': 686, 'PRIDE AND JOY': 686, 'PRINZ MEDIA': 687, 'PRINZ': 687, 'PRIS AUDIOVISUAIS': 688, 'PRO VIDEO': 689, 'PRO-VIDEO': 689, 'PRO-MOTION': 690, 'PRO MOTION': 690, 'PROD. JRB': 691, 'PROD JRB': 691, 'PRODISC': 692, 'PROKINO': 693, 'PROVOGUE RECORDS': 694, 'PROVOGUE': 694, 'PROWARE': 695, 'PULP VIDEO': 696, 'PULP': 696, 'PULSE VIDEO': 697, 'PULSE': 697, 'PURE AUDIO RECORDINGS': 698, 'PURE AUDIO': 698, 'PURE FLIX ENTERTAINMENT': 699, 'PURE FLIX': 699, 'PURE ENTERTAINMENT': 699, 'PYRAMIDE VIDEO': 700, 'PYRAMIDE': 700, 'QUALITY FILMS': 701, 'QUALITY': 701, 'QUARTO VALLEY RECORDS': 702, 'QUARTO VALLEY': 702, 'QUESTAR': 703, 'R SQUARED FILMS': 704, 'R SQUARED': 704, 'RAPID EYE MOVIES': 705, 'RAPID EYE': 705, 'RARO VIDEO': 706, 'RARO': 706, 'RAROVIDEO U.S.': 707, 'RAROVIDEO US': 707, 'RARO VIDEO US': 707, 'RARO VIDEO U.S.': 707, 'RARO U.S.': 707, 'RARO US': 707, 'RAVEN BANNER RELEASING': 708, 'RAVEN BANNER': 708, 'RAVEN': 708, 'RAZOR DIGITAL ENTERTAINMENT': 709, 'RAZOR DIGITAL': 709, 'RCA': 710, 'RCO LIVE': 711, 'RCO': 711, 'RCV': 712, 'REAL GONE MUSIC': 713, 'REAL GONE': 713, 'REANIMEDIA': 714, 'REANI MEDIA': 714, 'REDEMPTION': 715, 'REEL': 716, 'RELIANCE HOME VIDEO & GAMES': 717, 'RELIANCE HOME VIDEO AND GAMES': 717, 'RELIANCE HOME VIDEO': 717, 'RELIANCE VIDEO': 717, 'RELIANCE HOME': 717, 'RELIANCE': 717, 'REM CULTURE': 718, 'REMAIN IN LIGHT': 719, 'REPRISE': 720, 'RESEN': 721, 'RETROMEDIA': 722, 'REVELATION FILMS LTD.': 723, 'REVELATION FILMS LTD': 723, 'REVELATION FILMS': 723, 'REVELATION LTD.': 723, 'REVELATION LTD': 723, 'REVELATION': 723, 'REVOLVER ENTERTAINMENT': 724, 'REVOLVER': 724, 'RHINO MUSIC': 725, 'RHINO': 725, 'RHV': 726, 'RIGHT STUF': 727, 'RIMINI EDITIONS': 728, 'RISING SUN MEDIA': 729, 'RLJ ENTERTAINMENT': 730, 'RLJ': 730, 'ROADRUNNER RECORDS': 731, 'ROADSHOW ENTERTAINMENT': 732, 'ROADSHOW': 732, 'RONE': 733, 'RONIN FLIX': 734, 'ROTANA HOME ENTERTAINMENT': 735, 'ROTANA ENTERTAINMENT': 735, 'ROTANA HOME': 735, 'ROTANA': 735, 'ROUGH TRADE': 736, 'ROUNDER': 737, 'SAFFRON HILL FILMS': 738, 'SAFFRON HILL': 738, 'SAFFRON': 738, 'SAMUEL GOLDWYN FILMS': 739, 'SAMUEL GOLDWYN': 739, 'SAN FRANCISCO SYMPHONY': 740, 'SANDREW METRONOME': 741, 'SAPHRANE': 742, 'SAVOR': 743, 'SCANBOX ENTERTAINMENT': 744, 'SCANBOX': 744, 'SCENIC LABS': 745, 'SCHRÖDERMEDIA': 746, 'SCHRODERMEDIA': 746, 'SCHRODER MEDIA': 746, 'SCORPION RELEASING': 747, 'SCORPION': 747, 'SCREAM TEAM RELEASING': 748, 'SCREAM TEAM': 748, 'SCREEN MEDIA': 749, 'SCREEN': 749, 'SCREENBOUND PICTURES': 750, 'SCREENBOUND': 750, 'SCREENWAVE MEDIA': 751, 'SCREENWAVE': 751, 'SECOND RUN': 752, 'SECOND SIGHT': 753, 'SEEDSMAN GROUP': 754, 'SELECT VIDEO': 755, 'SELECTA VISION': 756, 'SENATOR': 757, 'SENTAI FILMWORKS': 758, 'SENTAI': 758, 'SEVEN7': 759, 'SEVERIN FILMS': 760, 'SEVERIN': 760, 'SEVILLE': 761, 'SEYONS ENTERTAINMENT': 762, 'SEYONS': 762, 'SF STUDIOS': 763, 'SGL ENTERTAINMENT': 764, 'SGL': 764, 'SHAMELESS': 765, 'SHAMROCK MEDIA': 766, 'SHAMROCK': 766, 'SHANGHAI EPIC MUSIC ENTERTAINMENT': 767, 'SHANGHAI EPIC ENTERTAINMENT': 767, 'SHANGHAI EPIC MUSIC': 767, 'SHANGHAI MUSIC ENTERTAINMENT': 767, 'SHANGHAI ENTERTAINMENT': 767, 'SHANGHAI MUSIC': 767, 'SHANGHAI': 767, 'SHEMAROO': 768, 'SHOCHIKU': 769, 'SHOCK': 770, 'SHOGAKU KAN': 771, 'SHOUT FACTORY': 772, 'SHOUT! FACTORY': 772, 'SHOUT': 772, 'SHOUT!': 772, 'SHOWBOX': 773, 'SHOWTIME ENTERTAINMENT': 774, 'SHOWTIME': 774, 'SHRIEK SHOW': 775, 'SHUDDER': 776, 'SIDONIS': 777, 'SIDONIS CALYSTA': 778, 'SIGNAL ONE ENTERTAINMENT': 779, 'SIGNAL ONE': 779, 'SIGNATURE ENTERTAINMENT': 780, 'SIGNATURE': 780, 'SILVER VISION': 781, 'SINISTER FILM': 782, 'SINISTER': 782, 'SIREN VISUAL ENTERTAINMENT': 783, 'SIREN VISUAL': 783, 'SIREN ENTERTAINMENT': 783, 'SIREN': 783, 'SKANI': 784, 'SKY DIGI': 785, 'SLASHER // VIDEO': 786, 'SLASHER / VIDEO': 786, 'SLASHER VIDEO': 786, 'SLASHER': 786, 'SLOVAK FILM INSTITUTE': 787, 'SLOVAK FILM': 787, - 'SFI': 787, 'SM LIFE DESIGN GROUP': 788, 'SMOOTH PICTURES': 789, 'SMOOTH': 789, 'SNAPPER MUSIC': 790, 'SNAPPER': 790, 'SODA PICTURES': 791, 'SODA': 791, 'SONO LUMINUS': 792, 'SONY MUSIC': 793, 'SONY PICTURES': 794, 'SONY': 794, 'SONY PICTURES CLASSICS': 795, 'SONY CLASSICS': 795, 'SOUL MEDIA': 796, 'SOUL': 796, 'SOULFOOD MUSIC DISTRIBUTION': 797, 'SOULFOOD DISTRIBUTION': 797, 'SOULFOOD MUSIC': 797, 'SOULFOOD': 797, 'SOYUZ': 798, 'SPECTRUM': 799, - 'SPENTZOS FILM': 800, 'SPENTZOS': 800, 'SPIRIT ENTERTAINMENT': 801, 'SPIRIT': 801, 'SPIRIT MEDIA GMBH': 802, 'SPIRIT MEDIA': 802, 'SPLENDID ENTERTAINMENT': 803, 'SPLENDID FILM': 804, 'SPO': 805, 'SQUARE ENIX': 806, 'SRI BALAJI VIDEO': 807, 'SRI BALAJI': 807, 'SRI': 807, 'SRI VIDEO': 807, 'SRS CINEMA': 808, 'SRS': 808, 'SSO RECORDINGS': 809, 'SSO': 809, 'ST2 MUSIC': 810, 'ST2': 810, 'STAR MEDIA ENTERTAINMENT': 811, 'STAR ENTERTAINMENT': 811, 'STAR MEDIA': 811, 'STAR': 811, 'STARLIGHT': 812, 'STARZ / ANCHOR BAY': 813, 'STARZ ANCHOR BAY': 813, 'STARZ': 813, 'ANCHOR BAY': 813, 'STER KINEKOR': 814, 'STERLING ENTERTAINMENT': 815, 'STERLING': 815, 'STINGRAY': 816, 'STOCKFISCH RECORDS': 817, 'STOCKFISCH': 817, 'STRAND RELEASING': 818, 'STRAND': 818, 'STUDIO 4K': 819, 'STUDIO CANAL': 820, 'STUDIO GHIBLI': 821, 'GHIBLI': 821, 'STUDIO HAMBURG ENTERPRISES': 822, 'HAMBURG ENTERPRISES': 822, 'STUDIO HAMBURG': 822, 'HAMBURG': 822, 'STUDIO S': 823, 'SUBKULTUR ENTERTAINMENT': 824, 'SUBKULTUR': 824, 'SUEVIA FILMS': 825, 'SUEVIA': 825, 'SUMMIT ENTERTAINMENT': 826, 'SUMMIT': 826, 'SUNFILM ENTERTAINMENT': 827, 'SUNFILM': 827, 'SURROUND RECORDS': 828, 'SURROUND': 828, 'SVENSK FILMINDUSTRI': 829, 'SVENSK': 829, 'SWEN FILMES': 830, 'SWEN FILMS': 830, 'SWEN': 830, 'SYNAPSE FILMS': 831, 'SYNAPSE': 831, 'SYNDICADO': 832, 'SYNERGETIC': 833, 'T- SERIES': 834, 'T-SERIES': 834, 'T SERIES': 834, 'TSERIES': 834, 'T.V.P.': 835, 'TVP': 835, 'TACET RECORDS': 836, 'TACET': 836, 'TAI SENG': 837, 'TAI SHENG': 838, 'TAKEONE': 839, 'TAKESHOBO': 840, 'TAMASA DIFFUSION': 841, 'TC ENTERTAINMENT': 842, 'TC': 842, 'TDK': 843, 'TEAM MARKETING': 844, 'TEATRO REAL': 845, 'TEMA DISTRIBUCIONES': 846, 'TEMPE DIGITAL': 847, 'TF1 VIDÉO': 848, 'TF1 VIDEO': 848, 'TF1': 848, 'THE BLU': 849, 'BLU': 849, 'THE ECSTASY OF FILMS': 850, 'THE FILM DETECTIVE': 851, 'FILM DETECTIVE': 851, 'THE JOKERS': 852, 'JOKERS': 852, 'THE ON': 853, 'ON': 853, 'THIMFILM': 854, 'THIM FILM': 854, 'THIM': 854, 'THIRD WINDOW FILMS': 855, 'THIRD WINDOW': 855, '3RD WINDOW FILMS': 855, '3RD WINDOW': 855, 'THUNDERBEAN ANIMATION': 856, 'THUNDERBEAN': 856, 'THUNDERBIRD RELEASING': 857, 'THUNDERBIRD': 857, 'TIBERIUS FILM': 858, 'TIME LIFE': 859, 'TIMELESS MEDIA GROUP': 860, 'TIMELESS MEDIA': 860, 'TIMELESS GROUP': 860, 'TIMELESS': 860, 'TLA RELEASING': 861, 'TLA': 861, 'TOBIS FILM': 862, 'TOBIS': 862, 'TOEI': 863, 'TOHO': 864, 'TOKYO SHOCK': 865, 'TOKYO': 865, 'TONPOOL MEDIEN GMBH': 866, 'TONPOOL MEDIEN': 866, 'TOPICS ENTERTAINMENT': 867, 'TOPICS': 867, 'TOUCHSTONE PICTURES': 868, 'TOUCHSTONE': 868, 'TRANSMISSION FILMS': 869, 'TRANSMISSION': 869, 'TRAVEL VIDEO STORE': 870, 'TRIART': 871, 'TRIGON FILM': 872, 'TRIGON': 872, 'TRINITY HOME ENTERTAINMENT': 873, 'TRINITY ENTERTAINMENT': 873, 'TRINITY HOME': 873, 'TRINITY': 873, 'TRIPICTURES': 874, 'TRI-PICTURES': 874, 'TRI PICTURES': 874, 'TROMA': 875, 'TURBINE MEDIEN': 876, 'TURTLE RECORDS': 877, 'TURTLE': 877, 'TVA FILMS': 878, 'TVA': 878, 'TWILIGHT TIME': 879, 'TWILIGHT': 879, 'TT': 879, 'TWIN CO., LTD.': 880, 'TWIN CO, LTD.': 880, 'TWIN CO., LTD': 880, 'TWIN CO, LTD': 880, 'TWIN CO LTD': 880, 'TWIN LTD': 880, 'TWIN CO.': 880, 'TWIN CO': 880, 'TWIN': 880, 'UCA': 881, 'UDR': 882, 'UEK': 883, 'UFA/DVD': 884, 'UFA DVD': 884, 'UFADVD': 884, 'UGC PH': 885, 'ULTIMATE3DHEAVEN': 886, 'ULTRA': 887, 'UMBRELLA ENTERTAINMENT': 888, 'UMBRELLA': 888, 'UMC': 889, "UNCORK'D ENTERTAINMENT": 890, 'UNCORKD ENTERTAINMENT': 890, 'UNCORK D ENTERTAINMENT': 890, "UNCORK'D": 890, 'UNCORK D': 890, 'UNCORKD': 890, 'UNEARTHED FILMS': 891, 'UNEARTHED': 891, 'UNI DISC': 892, 'UNIMUNDOS': 893, 'UNITEL': 894, 'UNIVERSAL MUSIC': 895, 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES HOME': 896, 'UNIVERSAL SONY PICTURES': 896, 'UNIVERSAL HOME ENTERTAINMENT': - 896, 'UNIVERSAL ENTERTAINMENT': 896, 'UNIVERSAL HOME': 896, 'UNIVERSAL STUDIOS': 897, 'UNIVERSAL': 897, 'UNIVERSE LASER & VIDEO CO.': 898, 'UNIVERSE LASER AND VIDEO CO.': 898, 'UNIVERSE LASER & VIDEO CO': 898, 'UNIVERSE LASER AND VIDEO CO': 898, 'UNIVERSE LASER CO.': 898, 'UNIVERSE LASER CO': 898, 'UNIVERSE LASER': 898, 'UNIVERSUM FILM': 899, 'UNIVERSUM': 899, 'UTV': 900, 'VAP': 901, 'VCI': 902, 'VENDETTA FILMS': 903, 'VENDETTA': 903, 'VERSÁTIL HOME VIDEO': 904, 'VERSÁTIL VIDEO': 904, 'VERSÁTIL HOME': 904, 'VERSÁTIL': 904, 'VERSATIL HOME VIDEO': 904, 'VERSATIL VIDEO': 904, 'VERSATIL HOME': 904, 'VERSATIL': 904, 'VERTICAL ENTERTAINMENT': 905, 'VERTICAL': 905, 'VÉRTICE 360º': 906, 'VÉRTICE 360': 906, 'VERTICE 360o': 906, 'VERTICE 360': 906, 'VERTIGO BERLIN': 907, 'VÉRTIGO FILMS': 908, 'VÉRTIGO': 908, 'VERTIGO FILMS': 908, 'VERTIGO': 908, 'VERVE PICTURES': 909, 'VIA VISION ENTERTAINMENT': 910, 'VIA VISION': 910, 'VICOL ENTERTAINMENT': 911, 'VICOL': 911, 'VICOM': 912, 'VICTOR ENTERTAINMENT': 913, 'VICTOR': 913, 'VIDEA CDE': 914, 'VIDEO FILM EXPRESS': 915, 'VIDEO FILM': 915, 'VIDEO EXPRESS': 915, 'VIDEO MUSIC, INC.': 916, 'VIDEO MUSIC, INC': 916, 'VIDEO MUSIC INC.': 916, 'VIDEO MUSIC INC': 916, 'VIDEO MUSIC': 916, 'VIDEO SERVICE CORP.': 917, 'VIDEO SERVICE CORP': 917, 'VIDEO SERVICE': 917, 'VIDEO TRAVEL': 918, 'VIDEOMAX': 919, 'VIDEO MAX': 919, 'VII PILLARS ENTERTAINMENT': 920, 'VII PILLARS': 920, 'VILLAGE FILMS': 921, 'VINEGAR SYNDROME': 922, 'VINEGAR': 922, 'VS': 922, 'VINNY MOVIES': 923, 'VINNY': 923, 'VIRGIL FILMS & ENTERTAINMENT': 924, 'VIRGIL FILMS AND ENTERTAINMENT': 924, 'VIRGIL ENTERTAINMENT': 924, 'VIRGIL FILMS': 924, 'VIRGIL': 924, 'VIRGIN RECORDS': 925, 'VIRGIN': 925, 'VISION FILMS': 926, 'VISION': 926, 'VISUAL ENTERTAINMENT GROUP': 927, 'VISUAL GROUP': 927, 'VISUAL ENTERTAINMENT': 927, 'VISUAL': 927, 'VIVENDI VISUAL ENTERTAINMENT': 928, 'VIVENDI VISUAL': 928, 'VIVENDI': 928, 'VIZ PICTURES': 929, 'VIZ': 929, 'VLMEDIA': 930, 'VL MEDIA': 930, 'VL': 930, 'VOLGA': 931, 'VVS FILMS': 932, - 'VVS': 932, 'VZ HANDELS GMBH': 933, 'VZ HANDELS': 933, 'WARD RECORDS': 934, 'WARD': 934, 'WARNER BROS.': 935, 'WARNER BROS': 935, 'WARNER ARCHIVE': 935, 'WARNER ARCHIVE COLLECTION': 935, 'WAC': 935, 'WARNER': 935, 'WARNER MUSIC': 936, 'WEA': 937, 'WEINSTEIN COMPANY': 938, 'WEINSTEIN': 938, 'WELL GO USA': 939, 'WELL GO': 939, 'WELTKINO FILMVERLEIH': 940, 'WEST VIDEO': 941, 'WEST': 941, 'WHITE PEARL MOVIES': 942, 'WHITE PEARL': 942, - 'WICKED-VISION MEDIA': 943, 'WICKED VISION MEDIA': 943, 'WICKEDVISION MEDIA': 943, 'WICKED-VISION': 943, 'WICKED VISION': 943, 'WICKEDVISION': 943, 'WIENERWORLD': 944, 'WILD BUNCH': 945, 'WILD EYE RELEASING': 946, 'WILD EYE': 946, 'WILD SIDE VIDEO': 947, 'WILD SIDE': 947, 'WME': 948, 'WOLFE VIDEO': 949, 'WOLFE': 949, 'WORD ON FIRE': 950, 'WORKS FILM GROUP': 951, 'WORLD WRESTLING': 952, 'WVG MEDIEN': 953, 'WWE STUDIOS': 954, 'WWE': 954, 'X RATED KULT': 955, 'X-RATED KULT': 955, 'X RATED CULT': 955, 'X-RATED CULT': 955, 'X RATED': 955, 'X-RATED': 955, 'XCESS': 956, 'XLRATOR': 957, 'XT VIDEO': 958, 'XT': 958, 'YAMATO VIDEO': 959, 'YAMATO': 959, 'YASH RAJ FILMS': 960, 'YASH RAJS': 960, 'ZEITGEIST FILMS': 961, 'ZEITGEIST': 961, 'ZENITH PICTURES': 962, 'ZENITH': 962, 'ZIMA': 963, 'ZYLO': 964, 'ZYX MUSIC': 965, 'ZYX': 965 + "01 DISTRIBUTION": 1, + "100 DESTINATIONS TRAVEL FILM": 2, + "101 FILMS": 3, + "1FILMS": 4, + "2 ENTERTAIN VIDEO": 5, + "20TH CENTURY FOX": 6, + "2L": 7, + "3D CONTENT HUB": 8, + "3D MEDIA": 9, + "3L FILM": 10, + "4DIGITAL": 11, + "4DVD": 12, + "4K ULTRA HD MOVIES": 13, + "4K UHD": 13, + "8-FILMS": 14, + "84 ENTERTAINMENT": 15, + "88 FILMS": 16, + "@ANIME": 17, + "ANIME": 17, + "A CONTRACORRIENTE": 18, + "A CONTRACORRIENTE FILMS": 19, + "A&E HOME VIDEO": 20, + "A&E": 20, + "A&M RECORDS": 21, + "A+E NETWORKS": 22, + "A+R": 23, + "A-FILM": 24, + "AAA": 25, + "AB VIDÉO": 26, + "AB VIDEO": 26, + "ABC - (AUSTRALIAN BROADCASTING CORPORATION)": 27, + "ABC": 27, + "ABKCO": 28, + "ABSOLUT MEDIEN": 29, + "ABSOLUTE": 30, + "ACCENT FILM ENTERTAINMENT": 31, + "ACCENTUS": 32, + "ACORN MEDIA": 33, + "AD VITAM": 34, + "ADA": 35, + "ADITYA VIDEOS": 36, + "ADSO FILMS": 37, + "AFM RECORDS": 38, + "AGFA": 39, + "AIX RECORDS": 40, + "ALAMODE FILM": 41, + "ALBA RECORDS": 42, + "ALBANY RECORDS": 43, + "ALBATROS": 44, + "ALCHEMY": 45, + "ALIVE": 46, + "ALL ANIME": 47, + "ALL INTERACTIVE ENTERTAINMENT": 48, + "ALLEGRO": 49, + "ALLIANCE": 50, + "ALPHA MUSIC": 51, + "ALTERDYSTRYBUCJA": 52, + "ALTERED INNOCENCE": 53, + "ALTITUDE FILM DISTRIBUTION": 54, + "ALUCARD RECORDS": 55, + "AMAZING D.C.": 56, + "AMAZING DC": 56, + "AMMO CONTENT": 57, + "AMUSE SOFT ENTERTAINMENT": 58, + "ANCONNECT": 59, + "ANEC": 60, + "ANIMATSU": 61, + "ANIME HOUSE": 62, + "ANIME LTD": 63, + "ANIME WORKS": 64, + "ANIMEIGO": 65, + "ANIPLEX": 66, + "ANOLIS ENTERTAINMENT": 67, + "ANOTHER WORLD ENTERTAINMENT": 68, + "AP INTERNATIONAL": 69, + "APPLE": 70, + "ARA MEDIA": 71, + "ARBELOS": 72, + "ARC ENTERTAINMENT": 73, + "ARP SÉLECTION": 74, + "ARP SELECTION": 74, + "ARROW": 75, + "ART SERVICE": 76, + "ART VISION": 77, + "ARTE ÉDITIONS": 78, + "ARTE EDITIONS": 78, + "ARTE VIDÉO": 79, + "ARTE VIDEO": 79, + "ARTHAUS MUSIK": 80, + "ARTIFICIAL EYE": 81, + "ARTSPLOITATION FILMS": 82, + "ARTUS FILMS": 83, + "ASCOT ELITE HOME ENTERTAINMENT": 84, + "ASIA VIDEO": 85, + "ASMIK ACE": 86, + "ASTRO RECORDS & FILMWORKS": 87, + "ASYLUM": 88, + "ATLANTIC FILM": 89, + "ATLANTIC RECORDS": 90, + "ATLAS FILM": 91, + "AUDIO VISUAL ENTERTAINMENT": 92, + "AURO-3D CREATIVE LABEL": 93, + "AURUM": 94, + "AV VISIONEN": 95, + "AV-JET": 96, + "AVALON": 97, + "AVENTI": 98, + "AVEX TRAX": 99, + "AXIOM": 100, + "AXIS RECORDS": 101, + "AYNGARAN": 102, + "BAC FILMS": 103, + "BACH FILMS": 104, + "BANDAI VISUAL": 105, + "BARCLAY": 106, + "BBC": 107, + "BRITISH BROADCASTING CORPORATION": 107, + "BBI FILMS": 108, + "BBI": 108, + "BCI HOME ENTERTAINMENT": 109, + "BEGGARS BANQUET": 110, + "BEL AIR CLASSIQUES": 111, + "BELGA FILMS": 112, + "BELVEDERE": 113, + "BENELUX FILM DISTRIBUTORS": 114, + "BENNETT-WATT MEDIA": 115, + "BERLIN CLASSICS": 116, + "BERLINER PHILHARMONIKER RECORDINGS": 117, + "BEST ENTERTAINMENT": 118, + "BEYOND HOME ENTERTAINMENT": 119, + "BFI VIDEO": 120, + "BFI": 120, + "BRITISH FILM INSTITUTE": 120, + "BFS ENTERTAINMENT": 121, + "BFS": 121, + "BHAVANI": 122, + "BIBER RECORDS": 123, + "BIG HOME VIDEO": 124, + "BILDSTÖRUNG": 125, + "BILDSTORUNG": 125, + "BILL ZEBUB": 126, + "BIRNENBLATT": 127, + "BIT WEL": 128, + "BLACK BOX": 129, + "BLACK HILL PICTURES": 130, + "BLACK HILL": 130, + "BLACK HOLE RECORDINGS": 131, + "BLACK HOLE": 131, + "BLAQOUT": 132, + "BLAUFIELD MUSIC": 133, + "BLAUFIELD": 133, + "BLOCKBUSTER ENTERTAINMENT": 134, + "BLOCKBUSTER": 134, + "BLU PHASE MEDIA": 135, + "BLU-RAY ONLY": 136, + "BLU-RAY": 136, + "BLURAY ONLY": 136, + "BLURAY": 136, + "BLUE GENTIAN RECORDS": 137, + "BLUE KINO": 138, + "BLUE UNDERGROUND": 139, + "BMG/ARISTA": 140, + "BMG": 140, + "BMGARISTA": 140, + "BMG ARISTA": 140, + "ARISTA": 140, + "ARISTA/BMG": 140, + "ARISTABMG": 140, + "ARISTA BMG": 140, + "BONTON FILM": 141, + "BONTON": 141, + "BOOMERANG PICTURES": 142, + "BOOMERANG": 142, + "BQHL ÉDITIONS": 143, + "BQHL EDITIONS": 143, + "BQHL": 143, + "BREAKING GLASS": 144, + "BRIDGESTONE": 145, + "BRINK": 146, + "BROAD GREEN PICTURES": 147, + "BROAD GREEN": 147, + "BUSCH MEDIA GROUP": 148, + "BUSCH": 148, + "C MAJOR": 149, + "C.B.S.": 150, + "CAICHANG": 151, + "CALIFÓRNIA FILMES": 152, + "CALIFORNIA FILMES": 152, + "CALIFORNIA": 152, + "CAMEO": 153, + "CAMERA OBSCURA": 154, + "CAMERATA": 155, + "CAMP MOTION PICTURES": 156, + "CAMP MOTION": 156, + "CAPELIGHT PICTURES": 157, + "CAPELIGHT": 157, + "CAPITOL": 159, + "CAPITOL RECORDS": 159, + "CAPRICCI": 160, + "CARGO RECORDS": 161, + "CARLOTTA FILMS": 162, + "CARLOTTA": 162, + "CARLOTA": 162, + "CARMEN FILM": 163, + "CASCADE": 164, + "CATCHPLAY": 165, + "CAULDRON FILMS": 166, + "CAULDRON": 166, + "CBS TELEVISION STUDIOS": 167, + "CBS": 167, + "CCTV": 168, + "CCV ENTERTAINMENT": 169, + "CCV": 169, + "CD BABY": 170, + "CD LAND": 171, + "CECCHI GORI": 172, + "CENTURY MEDIA": 173, + "CHUAN XUN SHI DAI MULTIMEDIA": 174, + "CINE-ASIA": 175, + "CINÉART": 176, + "CINEART": 176, + "CINEDIGM": 177, + "CINEFIL IMAGICA": 178, + "CINEMA EPOCH": 179, + "CINEMA GUILD": 180, + "CINEMA LIBRE STUDIOS": 181, + "CINEMA MONDO": 182, + "CINEMATIC VISION": 183, + "CINEPLOIT RECORDS": 184, + "CINESTRANGE EXTREME": 185, + "CITEL VIDEO": 186, + "CITEL": 186, + "CJ ENTERTAINMENT": 187, + "CJ": 187, + "CLASSIC MEDIA": 188, + "CLASSICFLIX": 189, + "CLASSICLINE": 190, + "CLAUDIO RECORDS": 191, + "CLEAR VISION": 192, + "CLEOPATRA": 193, + "CLOSE UP": 194, + "CMS MEDIA LIMITED": 195, + "CMV LASERVISION": 196, + "CN ENTERTAINMENT": 197, + "CODE RED": 198, + "COHEN MEDIA GROUP": 199, + "COHEN": 199, + "COIN DE MIRE CINÉMA": 200, + "COIN DE MIRE CINEMA": 200, + "COLOSSEO FILM": 201, + "COLUMBIA": 203, + "COLUMBIA PICTURES": 203, + "COLUMBIA/TRI-STAR": 204, + "TRI-STAR": 204, + "COMMERCIAL MARKETING": 205, + "CONCORD MUSIC GROUP": 206, + "CONCORDE VIDEO": 207, + "CONDOR": 208, + "CONSTANTIN FILM": 209, + "CONSTANTIN": 209, + "CONSTANTINO FILMES": 210, + "CONSTANTINO": 210, + "CONSTRUCTIVE MEDIA SERVICE": 211, + "CONSTRUCTIVE": 211, + "CONTENT ZONE": 212, + "CONTENTS GATE": 213, + "COQUEIRO VERDE": 214, + "CORNERSTONE MEDIA": 215, + "CORNERSTONE": 215, + "CP DIGITAL": 216, + "CREST MOVIES": 217, + "CRITERION": 218, + "CRITERION COLLECTION": 218, + "CC": 218, + "CRYSTAL CLASSICS": 219, + "CULT EPICS": 220, + "CULT FILMS": 221, + "CULT VIDEO": 222, + "CURZON FILM WORLD": 223, + "D FILMS": 224, + "D'AILLY COMPANY": 225, + "DAILLY COMPANY": 225, + "D AILLY COMPANY": 225, + "D'AILLY": 225, + "DAILLY": 225, + "D AILLY": 225, + "DA CAPO": 226, + "DA MUSIC": 227, + "DALL'ANGELO PICTURES": 228, + "DALLANGELO PICTURES": 228, + "DALL'ANGELO": 228, + "DALL ANGELO PICTURES": 228, + "DALL ANGELO": 228, + "DAREDO": 229, + "DARK FORCE ENTERTAINMENT": 230, + "DARK FORCE": 230, + "DARK SIDE RELEASING": 231, + "DARK SIDE": 231, + "DAZZLER MEDIA": 232, + "DAZZLER": 232, + "DCM PICTURES": 233, + "DCM": 233, + "DEAPLANETA": 234, + "DECCA": 235, + "DEEPJOY": 236, + "DEFIANT SCREEN ENTERTAINMENT": 237, + "DEFIANT SCREEN": 237, + "DEFIANT": 237, + "DELOS": 238, + "DELPHIAN RECORDS": 239, + "DELPHIAN": 239, + "DELTA MUSIC & ENTERTAINMENT": 240, + "DELTA MUSIC AND ENTERTAINMENT": 240, + "DELTA MUSIC ENTERTAINMENT": 240, + "DELTA MUSIC": 240, + "DELTAMAC CO. LTD.": 241, + "DELTAMAC CO LTD": 241, + "DELTAMAC CO": 241, + "DELTAMAC": 241, + "DEMAND MEDIA": 242, + "DEMAND": 242, + "DEP": 243, + "DEUTSCHE GRAMMOPHON": 244, + "DFW": 245, + "DGM": 246, + "DIAPHANA": 247, + "DIGIDREAMS STUDIOS": 248, + "DIGIDREAMS": 248, + "DIGITAL ENVIRONMENTS": 249, + "DIGITAL": 249, + "DISCOTEK MEDIA": 250, + "DISCOVERY CHANNEL": 251, + "DISCOVERY": 251, + "DISK KINO": 252, + "DISNEY / BUENA VISTA": 253, + "DISNEY": 253, + "BUENA VISTA": 253, + "DISNEY BUENA VISTA": 253, + "DISTRIBUTION SELECT": 254, + "DIVISA": 255, + "DNC ENTERTAINMENT": 256, + "DNC": 256, + "DOGWOOF": 257, + "DOLMEN HOME VIDEO": 258, + "DOLMEN": 258, + "DONAU FILM": 259, + "DONAU": 259, + "DORADO FILMS": 260, + "DORADO": 260, + "DRAFTHOUSE FILMS": 261, + "DRAFTHOUSE": 261, + "DRAGON FILM ENTERTAINMENT": 262, + "DRAGON ENTERTAINMENT": 262, + "DRAGON FILM": 262, + "DRAGON": 262, + "DREAMWORKS": 263, + "DRIVE ON RECORDS": 264, + "DRIVE ON": 264, + "DRIVE-ON": 264, + "DRIVEON": 264, + "DS MEDIA": 265, + "DTP ENTERTAINMENT AG": 266, + "DTP ENTERTAINMENT": 266, + "DTP AG": 266, + "DTP": 266, + "DTS ENTERTAINMENT": 267, + "DTS": 267, + "DUKE MARKETING": 268, + "DUKE VIDEO DISTRIBUTION": 269, + "DUKE": 269, + "DUTCH FILMWORKS": 270, + "DUTCH": 270, + "DVD INTERNATIONAL": 271, + "DVD": 271, + "DYBEX": 272, + "DYNAMIC": 273, + "DYNIT": 274, + "E1 ENTERTAINMENT": 275, + "E1": 275, + "EAGLE ENTERTAINMENT": 276, + "EAGLE HOME ENTERTAINMENT PVT.LTD.": 277, + "EAGLE HOME ENTERTAINMENT PVTLTD": 277, + "EAGLE HOME ENTERTAINMENT PVT LTD": 277, + "EAGLE HOME ENTERTAINMENT": 277, + "EAGLE PICTURES": 278, + "EAGLE ROCK ENTERTAINMENT": 279, + "EAGLE ROCK": 279, + "EAGLE VISION MEDIA": 280, + "EAGLE VISION": 280, + "EARMUSIC": 281, + "EARTH ENTERTAINMENT": 282, + "EARTH": 282, + "ECHO BRIDGE ENTERTAINMENT": 283, + "ECHO BRIDGE": 283, + "EDEL GERMANY GMBH": 284, + "EDEL GERMANY": 284, + "EDEL RECORDS": 285, + "EDITION TONFILM": 286, + "EDITIONS MONTPARNASSE": 287, + "EDKO FILMS LTD.": 288, + "EDKO FILMS LTD": 288, + "EDKO FILMS": 288, + "EDKO": 288, + "EIN'S M&M CO": 289, + "EINS M&M CO": 289, + "EIN'S M&M": 289, + "EINS M&M": 289, + "ELEA-MEDIA": 290, + "ELEA MEDIA": 290, + "ELEA": 290, + "ELECTRIC PICTURE": 291, + "ELECTRIC": 291, + "ELEPHANT FILMS": 292, + "ELEPHANT": 292, + "ELEVATION": 293, + "EMI": 294, + "EMON": 295, + "EMS": 296, + "EMYLIA": 297, + "ENE MEDIA": 298, + "ENE": 298, + "ENTERTAINMENT IN VIDEO": 299, + "ENTERTAINMENT IN": 299, + "ENTERTAINMENT ONE": 300, + "ENTERTAINMENT ONE FILMS CANADA INC.": 301, + "ENTERTAINMENT ONE FILMS CANADA INC": 301, + "ENTERTAINMENT ONE FILMS CANADA": 301, + "ENTERTAINMENT ONE CANADA INC": 301, + "ENTERTAINMENT ONE CANADA": 301, + "ENTERTAINMENTONE": 302, + "EONE": 303, + "EOS": 304, + "EPIC PICTURES": 305, + "EPIC": 305, + "EPIC RECORDS": 306, + "ERATO": 307, + "EROS": 308, + "ESC EDITIONS": 309, + "ESCAPI MEDIA BV": 310, + "ESOTERIC RECORDINGS": 311, + "ESPN FILMS": 312, + "EUREKA ENTERTAINMENT": 313, + "EUREKA": 313, + "EURO PICTURES": 314, + "EURO VIDEO": 315, + "EUROARTS": 316, + "EUROPA FILMES": 317, + "EUROPA": 317, + "EUROPACORP": 318, + "EUROZOOM": 319, + "EXCEL": 320, + "EXPLOSIVE MEDIA": 321, + "EXPLOSIVE": 321, + "EXTRALUCID FILMS": 322, + "EXTRALUCID": 322, + "EYE SEE MOVIES": 323, + "EYE SEE": 323, + "EYK MEDIA": 324, + "EYK": 324, + "FABULOUS FILMS": 325, + "FABULOUS": 325, + "FACTORIS FILMS": 326, + "FACTORIS": 326, + "FARAO RECORDS": 327, + "FARBFILM HOME ENTERTAINMENT": 328, + "FARBFILM ENTERTAINMENT": 328, + "FARBFILM HOME": 328, + "FARBFILM": 328, + "FEELGOOD ENTERTAINMENT": 329, + "FEELGOOD": 329, + "FERNSEHJUWELEN": 330, + "FILM CHEST": 331, + "FILM MEDIA": 332, + "FILM MOVEMENT": 333, + "FILM4": 334, + "FILMART": 335, + "FILMAURO": 336, + "FILMAX": 337, + "FILMCONFECT HOME ENTERTAINMENT": 338, + "FILMCONFECT ENTERTAINMENT": 338, + "FILMCONFECT HOME": 338, + "FILMCONFECT": 338, + "FILMEDIA": 339, + "FILMJUWELEN": 340, + "FILMOTEKA NARODAWA": 341, + "FILMRISE": 342, + "FINAL CUT ENTERTAINMENT": 343, + "FINAL CUT": 343, + "FIREHOUSE 12 RECORDS": 344, + "FIREHOUSE 12": 344, + "FIRST INTERNATIONAL PRODUCTION": 345, + "FIRST INTERNATIONAL": 345, + "FIRST LOOK STUDIOS": 346, + "FIRST LOOK": 346, + "FLAGMAN TRADE": 347, + "FLASHSTAR FILMES": 348, + "FLASHSTAR": 348, + "FLICKER ALLEY": 349, + "FNC ADD CULTURE": 350, + "FOCUS FILMES": 351, + "FOCUS": 351, + "FOKUS MEDIA": 352, + "FOKUSA": 352, + "FOX PATHE EUROPA": 353, + "FOX PATHE": 353, + "FOX EUROPA": 353, + "FOX/MGM": 354, + "FOX MGM": 354, + "MGM": 354, + "MGM/FOX": 354, + "FOX": 354, + "FPE": 355, + "FRANCE TÉLÉVISIONS DISTRIBUTION": 356, + "FRANCE TELEVISIONS DISTRIBUTION": 356, + "FRANCE TELEVISIONS": 356, + "FRANCE": 356, + "FREE DOLPHIN ENTERTAINMENT": 357, + "FREE DOLPHIN": 357, + "FREESTYLE DIGITAL MEDIA": 358, + "FREESTYLE DIGITAL": 358, + "FREESTYLE": 358, + "FREMANTLE HOME ENTERTAINMENT": 359, + "FREMANTLE ENTERTAINMENT": 359, + "FREMANTLE HOME": 359, + "FREMANTL": 359, + "FRENETIC FILMS": 360, + "FRENETIC": 360, + "FRONTIER WORKS": 361, + "FRONTIER": 361, + "FRONTIERS MUSIC": 362, + "FRONTIERS RECORDS": 363, + "FS FILM OY": 364, + "FS FILM": 364, + "FULL MOON FEATURES": 365, + "FULL MOON": 365, + "FUN CITY EDITIONS": 366, + "FUN CITY": 366, + "FUNIMATION ENTERTAINMENT": 367, + "FUNIMATION": 367, + "FUSION": 368, + "FUTUREFILM": 369, + "G2 PICTURES": 370, + "G2": 370, + "GAGA COMMUNICATIONS": 371, + "GAGA": 371, + "GAIAM": 372, + "GALAPAGOS": 373, + "GAMMA HOME ENTERTAINMENT": 374, + "GAMMA ENTERTAINMENT": 374, + "GAMMA HOME": 374, + "GAMMA": 374, + "GARAGEHOUSE PICTURES": 375, + "GARAGEHOUSE": 375, + "GARAGEPLAY (車庫娛樂)": 376, + "車庫娛樂": 376, + "GARAGEPLAY (Che Ku Yu Le )": 376, + "GARAGEPLAY": 376, + "Che Ku Yu Le": 376, + "GAUMONT": 377, + "GEFFEN": 378, + "GENEON ENTERTAINMENT": 379, + "GENEON": 379, + "GENEON UNIVERSAL ENTERTAINMENT": 380, + "GENERAL VIDEO RECORDING": 381, + "GLASS DOLL FILMS": 382, + "GLASS DOLL": 382, + "GLOBE MUSIC MEDIA": 383, + "GLOBE MUSIC": 383, + "GLOBE MEDIA": 383, + "GLOBE": 383, + "GO ENTERTAIN": 384, + "GO": 384, + "GOLDEN HARVEST": 385, + "GOOD!MOVIES": 386, + "GOOD! MOVIES": 386, + "GOOD MOVIES": 386, + "GRAPEVINE VIDEO": 387, + "GRAPEVINE": 387, + "GRASSHOPPER FILM": 388, + "GRASSHOPPER FILMS": 388, + "GRASSHOPPER": 388, + "GRAVITAS VENTURES": 389, + "GRAVITAS": 389, + "GREAT MOVIES": 390, + "GREAT": 390, + "GREEN APPLE ENTERTAINMENT": 391, + "GREEN ENTERTAINMENT": 391, + "GREEN APPLE": 391, + "GREEN": 391, + "GREENNARAE MEDIA": 392, + "GREENNARAE": 392, + "GRINDHOUSE RELEASING": 393, + "GRINDHOUSE": 393, + "GRIND HOUSE": 393, + "GRYPHON ENTERTAINMENT": 394, + "GRYPHON": 394, + "GUNPOWDER & SKY": 395, + "GUNPOWDER AND SKY": 395, + "GUNPOWDER SKY": 395, + "GUNPOWDER + SKY": 395, + "GUNPOWDER": 395, + "HANABEE ENTERTAINMENT": 396, + "HANABEE": 396, + "HANNOVER HOUSE": 397, + "HANNOVER": 397, + "HANSESOUND": 398, + "HANSE SOUND": 398, + "HANSE": 398, + "HAPPINET": 399, + "HARMONIA MUNDI": 400, + "HARMONIA": 400, + "HBO": 401, + "HDC": 402, + "HEC": 403, + "HELL & BACK RECORDINGS": 404, + "HELL AND BACK RECORDINGS": 404, + "HELL & BACK": 404, + "HELL AND BACK": 404, + "HEN'S TOOTH VIDEO": 405, + "HENS TOOTH VIDEO": 405, + "HEN'S TOOTH": 405, + "HENS TOOTH": 405, + "HIGH FLIERS": 406, + "HIGHLIGHT": 407, + "HILLSONG": 408, + "HISTORY CHANNEL": 409, + "HISTORY": 409, + "HK VIDÉO": 410, + "HK VIDEO": 410, + "HK": 410, + "HMH HAMBURGER MEDIEN HAUS": 411, + "HAMBURGER MEDIEN HAUS": 411, + "HMH HAMBURGER MEDIEN": 411, + "HMH HAMBURGER": 411, + "HMH": 411, + "HOLLYWOOD CLASSIC ENTERTAINMENT": 412, + "HOLLYWOOD CLASSIC": 412, + "HOLLYWOOD PICTURES": 413, + "HOLLYWOOD": 413, + "HOPSCOTCH ENTERTAINMENT": 414, + "HOPSCOTCH": 414, + "HPM": 415, + "HÄNNSLER CLASSIC": 416, + "HANNSLER CLASSIC": 416, + "HANNSLER": 416, + "I-CATCHER": 417, + "I CATCHER": 417, + "ICATCHER": 417, + "I-ON NEW MEDIA": 418, + "I ON NEW MEDIA": 418, + "ION NEW MEDIA": 418, + "ION MEDIA": 418, + "I-ON": 418, + "ION": 418, + "IAN PRODUCTIONS": 419, + "IAN": 419, + "ICESTORM": 420, + "ICON FILM DISTRIBUTION": 421, + "ICON DISTRIBUTION": 421, + "ICON FILM": 421, + "ICON": 421, + "IDEALE AUDIENCE": 422, + "IDEALE": 422, + "IFC FILMS": 423, + "IFC": 423, + "IFILM": 424, + "ILLUSIONS UNLTD.": 425, + "ILLUSIONS UNLTD": 425, + "ILLUSIONS": 425, + "IMAGE ENTERTAINMENT": 426, + "IMAGE": 426, + "IMAGEM FILMES": 427, + "IMAGEM": 427, + "IMOVISION": 428, + "IMPERIAL CINEPIX": 429, + "IMPRINT": 430, + "IMPULS HOME ENTERTAINMENT": 431, + "IMPULS ENTERTAINMENT": 431, + "IMPULS HOME": 431, + "IMPULS": 431, + "IN-AKUSTIK": 432, + "IN AKUSTIK": 432, + "INAKUSTIK": 432, + "INCEPTION MEDIA GROUP": 433, + "INCEPTION MEDIA": 433, + "INCEPTION GROUP": 433, + "INCEPTION": 433, + "INDEPENDENT": 434, + "INDICAN": 435, + "INDIE RIGHTS": 436, + "INDIE": 436, + "INDIGO": 437, + "INFO": 438, + "INJOINGAN": 439, + "INKED PICTURES": 440, + "INKED": 440, + "INSIDE OUT MUSIC": 441, + "INSIDE MUSIC": 441, + "INSIDE OUT": 441, + "INSIDE": 441, + "INTERCOM": 442, + "INTERCONTINENTAL VIDEO": 443, + "INTERCONTINENTAL": 443, + "INTERGROOVE": 444, + "INTERSCOPE": 445, + "INVINCIBLE PICTURES": 446, + "INVINCIBLE": 446, + "ISLAND/MERCURY": 447, + "ISLAND MERCURY": 447, + "ISLANDMERCURY": 447, + "ISLAND & MERCURY": 447, + "ISLAND AND MERCURY": 447, + "ISLAND": 447, + "ITN": 448, + "ITV DVD": 449, + "ITV": 449, + "IVC": 450, + "IVE ENTERTAINMENT": 451, + "IVE": 451, + "J&R ADVENTURES": 452, + "J&R": 452, + "JR": 452, + "JAKOB": 453, + "JONU MEDIA": 454, + "JONU": 454, + "JRB PRODUCTIONS": 455, + "JRB": 455, + "JUST BRIDGE ENTERTAINMENT": 456, + "JUST BRIDGE": 456, + "JUST ENTERTAINMENT": 456, + "JUST": 456, + "KABOOM ENTERTAINMENT": 457, + "KABOOM": 457, + "KADOKAWA ENTERTAINMENT": 458, + "KADOKAWA": 458, + "KAIROS": 459, + "KALEIDOSCOPE ENTERTAINMENT": 460, + "KALEIDOSCOPE": 460, + "KAM & RONSON ENTERPRISES": 461, + "KAM & RONSON": 461, + "KAM&RONSON ENTERPRISES": 461, + "KAM&RONSON": 461, + "KAM AND RONSON ENTERPRISES": 461, + "KAM AND RONSON": 461, + "KANA HOME VIDEO": 462, + "KARMA FILMS": 463, + "KARMA": 463, + "KATZENBERGER": 464, + "KAZE": 465, + "KBS MEDIA": 466, + "KBS": 466, + "KD MEDIA": 467, + "KD": 467, + "KING MEDIA": 468, + "KING": 468, + "KING RECORDS": 469, + "KINO LORBER": 470, + "KINO": 470, + "KINO SWIAT": 471, + "KINOKUNIYA": 472, + "KINOWELT HOME ENTERTAINMENT/DVD": 473, + "KINOWELT HOME ENTERTAINMENT": 473, + "KINOWELT ENTERTAINMENT": 473, + "KINOWELT HOME DVD": 473, + "KINOWELT ENTERTAINMENT/DVD": 473, + "KINOWELT DVD": 473, + "KINOWELT": 473, + "KIT PARKER FILMS": 474, + "KIT PARKER": 474, + "KITTY MEDIA": 475, + "KNM HOME ENTERTAINMENT": 476, + "KNM ENTERTAINMENT": 476, + "KNM HOME": 476, + "KNM": 476, + "KOBA FILMS": 477, + "KOBA": 477, + "KOCH ENTERTAINMENT": 478, + "KOCH MEDIA": 479, + "KOCH": 479, + "KRAKEN RELEASING": 480, + "KRAKEN": 480, + "KSCOPE": 481, + "KSM": 482, + "KULTUR": 483, + "L'ATELIER D'IMAGES": 484, + "LATELIER D'IMAGES": 484, + "L'ATELIER DIMAGES": 484, + "LATELIER DIMAGES": 484, + "L ATELIER D'IMAGES": 484, + "L'ATELIER D IMAGES": 484, + "L ATELIER D IMAGES": 484, + "L'ATELIER": 484, + "L ATELIER": 484, + "LATELIER": 484, + "LA AVENTURA AUDIOVISUAL": 485, + "LA AVENTURA": 485, + "LACE GROUP": 486, + "LACE": 486, + "LASER PARADISE": 487, + "LAYONS": 488, + "LCJ EDITIONS": 489, + "LCJ": 489, + "LE CHAT QUI FUME": 490, + "LE PACTE": 491, + "LEDICK FILMHANDEL": 492, + "LEGEND": 493, + "LEOMARK STUDIOS": 494, + "LEOMARK": 494, + "LEONINE FILMS": 495, + "LEONINE": 495, + "LICHTUNG MEDIA LTD": 496, + "LICHTUNG LTD": 496, + "LICHTUNG MEDIA LTD.": 496, + "LICHTUNG LTD.": 496, + "LICHTUNG MEDIA": 496, + "LICHTUNG": 496, + "LIGHTHOUSE HOME ENTERTAINMENT": 497, + "LIGHTHOUSE ENTERTAINMENT": 497, + "LIGHTHOUSE HOME": 497, + "LIGHTHOUSE": 497, + "LIGHTYEAR": 498, + "LIONSGATE FILMS": 499, + "LIONSGATE": 499, + "LIZARD CINEMA TRADE": 500, + "LLAMENTOL": 501, + "LOBSTER FILMS": 502, + "LOBSTER": 502, + "LOGON": 503, + "LORBER FILMS": 504, + "LORBER": 504, + "LOS BANDITOS FILMS": 505, + "LOS BANDITOS": 505, + "LOUD & PROUD RECORDS": 506, + "LOUD AND PROUD RECORDS": 506, + "LOUD & PROUD": 506, + "LOUD AND PROUD": 506, + "LSO LIVE": 507, + "LUCASFILM": 508, + "LUCKY RED": 509, + "LUMIÈRE HOME ENTERTAINMENT": 510, + "LUMIERE HOME ENTERTAINMENT": 510, + "LUMIERE ENTERTAINMENT": 510, + "LUMIERE HOME": 510, + "LUMIERE": 510, + "M6 VIDEO": 511, + "M6": 511, + "MAD DIMENSION": 512, + "MADMAN ENTERTAINMENT": 513, + "MADMAN": 513, + "MAGIC BOX": 514, + "MAGIC PLAY": 515, + "MAGNA HOME ENTERTAINMENT": 516, + "MAGNA ENTERTAINMENT": 516, + "MAGNA HOME": 516, + "MAGNA": 516, + "MAGNOLIA PICTURES": 517, + "MAGNOLIA": 517, + "MAIDEN JAPAN": 518, + "MAIDEN": 518, + "MAJENG MEDIA": 519, + "MAJENG": 519, + "MAJESTIC HOME ENTERTAINMENT": 520, + "MAJESTIC ENTERTAINMENT": 520, + "MAJESTIC HOME": 520, + "MAJESTIC": 520, + "MANGA HOME ENTERTAINMENT": 521, + "MANGA ENTERTAINMENT": 521, + "MANGA HOME": 521, + "MANGA": 521, + "MANTA LAB": 522, + "MAPLE STUDIOS": 523, + "MAPLE": 523, + "MARCO POLO PRODUCTION": 524, + "MARCO POLO": 524, + "MARIINSKY": 525, + "MARVEL STUDIOS": 526, + "MARVEL": 526, + "MASCOT RECORDS": 527, + "MASCOT": 527, + "MASSACRE VIDEO": 528, + "MASSACRE": 528, + "MATCHBOX": 529, + "MATRIX D": 530, + "MAXAM": 531, + "MAYA HOME ENTERTAINMENT": 532, + "MAYA ENTERTAINMENT": 532, + "MAYA HOME": 532, + "MAYAT": 532, + "MDG": 533, + "MEDIA BLASTERS": 534, + "MEDIA FACTORY": 535, + "MEDIA TARGET DISTRIBUTION": 536, + "MEDIA TARGET": 536, + "MEDIAINVISION": 537, + "MEDIATOON": 538, + "MEDIATRES ESTUDIO": 539, + "MEDIATRES STUDIO": 539, + "MEDIATRES": 539, + "MEDICI ARTS": 540, + "MEDICI CLASSICS": 541, + "MEDIUMRARE ENTERTAINMENT": 542, + "MEDIUMRARE": 542, + "MEDUSA": 543, + "MEGASTAR": 544, + "MEI AH": 545, + "MELI MÉDIAS": 546, + "MELI MEDIAS": 546, + "MEMENTO FILMS": 547, + "MEMENTO": 547, + "MENEMSHA FILMS": 548, + "MENEMSHA": 548, + "MERCURY": 549, + "MERCURY STUDIOS": 550, + "MERGE SOFT PRODUCTIONS": 551, + "MERGE PRODUCTIONS": 551, + "MERGE SOFT": 551, + "MERGE": 551, + "METAL BLADE RECORDS": 552, + "METAL BLADE": 552, + "METEOR": 553, + "METRO-GOLDWYN-MAYER": 554, + "METRO GOLDWYN MAYER": 554, + "METROGOLDWYNMAYER": 554, + "METRODOME VIDEO": 555, + "METRODOME": 555, + "METROPOLITAN": 556, + "MFA+": 557, + "MFA": 557, + "MIG FILMGROUP": 558, + "MIG": 558, + "MILESTONE": 559, + "MILL CREEK ENTERTAINMENT": 560, + "MILL CREEK": 560, + "MILLENNIUM MEDIA": 561, + "MILLENNIUM": 561, + "MIRAGE ENTERTAINMENT": 562, + "MIRAGE": 562, + "MIRAMAX": 563, + "MISTERIYA ZVUKA": 564, + "MK2": 565, + "MODE RECORDS": 566, + "MODE": 566, + "MOMENTUM PICTURES": 567, + "MONDO HOME ENTERTAINMENT": 568, + "MONDO ENTERTAINMENT": 568, + "MONDO HOME": 568, + "MONDO MACABRO": 569, + "MONGREL MEDIA": 570, + "MONOLIT": 571, + "MONOLITH VIDEO": 572, + "MONOLITH": 572, + "MONSTER PICTURES": 573, + "MONSTER": 573, + "MONTEREY VIDEO": 574, + "MONTEREY": 574, + "MONUMENT RELEASING": 575, + "MONUMENT": 575, + "MORNINGSTAR": 576, + "MORNING STAR": 576, + "MOSERBAER": 577, + "MOVIEMAX": 578, + "MOVINSIDE": 579, + "MPI MEDIA GROUP": 580, + "MPI MEDIA": 580, + "MPI": 580, + "MR. BONGO FILMS": 581, + "MR BONGO FILMS": 581, + "MR BONGO": 581, + "MRG (MERIDIAN)": 582, + "MRG MERIDIAN": 582, + "MRG": 582, + "MERIDIAN": 582, + "MUBI": 583, + "MUG SHOT PRODUCTIONS": 584, + "MUG SHOT": 584, + "MULTIMUSIC": 585, + "MULTI-MUSIC": 585, + "MULTI MUSIC": 585, + "MUSE": 586, + "MUSIC BOX FILMS": 587, + "MUSIC BOX": 587, + "MUSICBOX": 587, + "MUSIC BROKERS": 588, + "MUSIC THEORIES": 589, + "MUSIC VIDEO DISTRIBUTORS": 590, + "MUSIC VIDEO": 590, + "MUSTANG ENTERTAINMENT": 591, + "MUSTANG": 591, + "MVD VISUAL": 592, + "MVD": 592, + "MVD/VSC": 593, + "MVL": 594, + "MVM ENTERTAINMENT": 595, + "MVM": 595, + "MYNDFORM": 596, + "MYSTIC NIGHT PICTURES": 597, + "MYSTIC NIGHT": 597, + "NAMELESS MEDIA": 598, + "NAMELESS": 598, + "NAPALM RECORDS": 599, + "NAPALM": 599, + "NATIONAL ENTERTAINMENT MEDIA": 600, + "NATIONAL ENTERTAINMENT": 600, + "NATIONAL MEDIA": 600, + "NATIONAL FILM ARCHIVE": 601, + "NATIONAL ARCHIVE": 601, + "NATIONAL FILM": 601, + "NATIONAL GEOGRAPHIC": 602, + "NAT GEO TV": 602, + "NAT GEO": 602, + "NGO": 602, + "NAXOS": 603, + "NBCUNIVERSAL ENTERTAINMENT JAPAN": 604, + "NBC UNIVERSAL ENTERTAINMENT JAPAN": 604, + "NBCUNIVERSAL JAPAN": 604, + "NBC UNIVERSAL JAPAN": 604, + "NBC JAPAN": 604, + "NBO ENTERTAINMENT": 605, + "NBO": 605, + "NEOS": 606, + "NETFLIX": 607, + "NETWORK": 608, + "NEW BLOOD": 609, + "NEW DISC": 610, + "NEW KSM": 611, + "NEW LINE CINEMA": 612, + "NEW LINE": 612, + "NEW MOVIE TRADING CO. LTD": 613, + "NEW MOVIE TRADING CO LTD": 613, + "NEW MOVIE TRADING CO": 613, + "NEW MOVIE TRADING": 613, + "NEW WAVE FILMS": 614, + "NEW WAVE": 614, + "NFI": 615, + "NHK": 616, + "NIPPONART": 617, + "NIS AMERICA": 618, + "NJUTAFILMS": 619, + "NOBLE ENTERTAINMENT": 620, + "NOBLE": 620, + "NORDISK FILM": 621, + "NORDISK": 621, + "NORSK FILM": 622, + "NORSK": 622, + "NORTH AMERICAN MOTION PICTURES": 623, + "NOS AUDIOVISUAIS": 624, + "NOTORIOUS PICTURES": 625, + "NOTORIOUS": 625, + "NOVA MEDIA": 626, + "NOVA": 626, + "NOVA SALES AND DISTRIBUTION": 627, + "NOVA SALES & DISTRIBUTION": 627, + "NSM": 628, + "NSM RECORDS": 629, + "NUCLEAR BLAST": 630, + "NUCLEUS FILMS": 631, + "NUCLEUS": 631, + "OBERLIN MUSIC": 632, + "OBERLIN": 632, + "OBRAS-PRIMAS DO CINEMA": 633, + "OBRAS PRIMAS DO CINEMA": 633, + "OBRASPRIMAS DO CINEMA": 633, + "OBRAS-PRIMAS CINEMA": 633, + "OBRAS PRIMAS CINEMA": 633, + "OBRASPRIMAS CINEMA": 633, + "OBRAS-PRIMAS": 633, + "OBRAS PRIMAS": 633, + "OBRASPRIMAS": 633, + "ODEON": 634, + "OFDB FILMWORKS": 635, + "OFDB": 635, + "OLIVE FILMS": 636, + "OLIVE": 636, + "ONDINE": 637, + "ONSCREEN FILMS": 638, + "ONSCREEN": 638, + "OPENING DISTRIBUTION": 639, + "OPERA AUSTRALIA": 640, + "OPTIMUM HOME ENTERTAINMENT": 641, + "OPTIMUM ENTERTAINMENT": 641, + "OPTIMUM HOME": 641, + "OPTIMUM": 641, + "OPUS ARTE": 642, + "ORANGE STUDIO": 643, + "ORANGE": 643, + "ORLANDO EASTWOOD FILMS": 644, + "ORLANDO FILMS": 644, + "ORLANDO EASTWOOD": 644, + "ORLANDO": 644, + "ORUSTAK PICTURES": 645, + "ORUSTAK": 645, + "OSCILLOSCOPE PICTURES": 646, + "OSCILLOSCOPE": 646, + "OUTPLAY": 647, + "PALISADES TARTAN": 648, + "PAN VISION": 649, + "PANVISION": 649, + "PANAMINT CINEMA": 650, + "PANAMINT": 650, + "PANDASTORM ENTERTAINMENT": 651, + "PANDA STORM ENTERTAINMENT": 651, + "PANDASTORM": 651, + "PANDA STORM": 651, + "PANDORA FILM": 652, + "PANDORA": 652, + "PANEGYRIC": 653, + "PANORAMA": 654, + "PARADE DECK FILMS": 655, + "PARADE DECK": 655, + "PARADISE": 656, + "PARADISO FILMS": 657, + "PARADOX": 658, + "PARAMOUNT PICTURES": 659, + "PARAMOUNT": 659, + "PARIS FILMES": 660, + "PARIS FILMS": 660, + "PARIS": 660, + "PARK CIRCUS": 661, + "PARLOPHONE": 662, + "PASSION RIVER": 663, + "PATHE DISTRIBUTION": 664, + "PATHE": 664, + "PBS": 665, + "PEACE ARCH TRINITY": 666, + "PECCADILLO PICTURES": 667, + "PEPPERMINT": 668, + "PHASE 4 FILMS": 669, + "PHASE 4": 669, + "PHILHARMONIA BAROQUE": 670, + "PICTURE HOUSE ENTERTAINMENT": 671, + "PICTURE ENTERTAINMENT": 671, + "PICTURE HOUSE": 671, + "PICTURE": 671, + "PIDAX": 672, + "PINK FLOYD RECORDS": 673, + "PINK FLOYD": 673, + "PINNACLE FILMS": 674, + "PINNACLE": 674, + "PLAIN": 675, + "PLATFORM ENTERTAINMENT LIMITED": 676, + "PLATFORM ENTERTAINMENT LTD": 676, + "PLATFORM ENTERTAINMENT LTD.": 676, + "PLATFORM ENTERTAINMENT": 676, + "PLATFORM": 676, + "PLAYARTE": 677, + "PLG UK CLASSICS": 678, + "PLG UK": 678, + "PLG": 678, + "POLYBAND & TOPPIC VIDEO/WVG": 679, + "POLYBAND AND TOPPIC VIDEO/WVG": 679, + "POLYBAND & TOPPIC VIDEO WVG": 679, + "POLYBAND & TOPPIC VIDEO AND WVG": 679, + "POLYBAND & TOPPIC VIDEO & WVG": 679, + "POLYBAND AND TOPPIC VIDEO WVG": 679, + "POLYBAND AND TOPPIC VIDEO AND WVG": 679, + "POLYBAND AND TOPPIC VIDEO & WVG": 679, + "POLYBAND & TOPPIC VIDEO": 679, + "POLYBAND AND TOPPIC VIDEO": 679, + "POLYBAND & TOPPIC": 679, + "POLYBAND AND TOPPIC": 679, + "POLYBAND": 679, + "WVG": 679, + "POLYDOR": 680, + "PONY": 681, + "PONY CANYON": 682, + "POTEMKINE": 683, + "POWERHOUSE FILMS": 684, + "POWERHOUSE": 684, + "POWERSTATIOM": 685, + "PRIDE & JOY": 686, + "PRIDE AND JOY": 686, + "PRINZ MEDIA": 687, + "PRINZ": 687, + "PRIS AUDIOVISUAIS": 688, + "PRO VIDEO": 689, + "PRO-VIDEO": 689, + "PRO-MOTION": 690, + "PRO MOTION": 690, + "PROD. JRB": 691, + "PROD JRB": 691, + "PRODISC": 692, + "PROKINO": 693, + "PROVOGUE RECORDS": 694, + "PROVOGUE": 694, + "PROWARE": 695, + "PULP VIDEO": 696, + "PULP": 696, + "PULSE VIDEO": 697, + "PULSE": 697, + "PURE AUDIO RECORDINGS": 698, + "PURE AUDIO": 698, + "PURE FLIX ENTERTAINMENT": 699, + "PURE FLIX": 699, + "PURE ENTERTAINMENT": 699, + "PYRAMIDE VIDEO": 700, + "PYRAMIDE": 700, + "QUALITY FILMS": 701, + "QUALITY": 701, + "QUARTO VALLEY RECORDS": 702, + "QUARTO VALLEY": 702, + "QUESTAR": 703, + "R SQUARED FILMS": 704, + "R SQUARED": 704, + "RAPID EYE MOVIES": 705, + "RAPID EYE": 705, + "RARO VIDEO": 706, + "RARO": 706, + "RAROVIDEO U.S.": 707, + "RAROVIDEO US": 707, + "RARO VIDEO US": 707, + "RARO VIDEO U.S.": 707, + "RARO U.S.": 707, + "RARO US": 707, + "RAVEN BANNER RELEASING": 708, + "RAVEN BANNER": 708, + "RAVEN": 708, + "RAZOR DIGITAL ENTERTAINMENT": 709, + "RAZOR DIGITAL": 709, + "RCA": 710, + "RCO LIVE": 711, + "RCO": 711, + "RCV": 712, + "REAL GONE MUSIC": 713, + "REAL GONE": 713, + "REANIMEDIA": 714, + "REANI MEDIA": 714, + "REDEMPTION": 715, + "REEL": 716, + "RELIANCE HOME VIDEO & GAMES": 717, + "RELIANCE HOME VIDEO AND GAMES": 717, + "RELIANCE HOME VIDEO": 717, + "RELIANCE VIDEO": 717, + "RELIANCE HOME": 717, + "RELIANCE": 717, + "REM CULTURE": 718, + "REMAIN IN LIGHT": 719, + "REPRISE": 720, + "RESEN": 721, + "RETROMEDIA": 722, + "REVELATION FILMS LTD.": 723, + "REVELATION FILMS LTD": 723, + "REVELATION FILMS": 723, + "REVELATION LTD.": 723, + "REVELATION LTD": 723, + "REVELATION": 723, + "REVOLVER ENTERTAINMENT": 724, + "REVOLVER": 724, + "RHINO MUSIC": 725, + "RHINO": 725, + "RHV": 726, + "RIGHT STUF": 727, + "RIMINI EDITIONS": 728, + "RISING SUN MEDIA": 729, + "RLJ ENTERTAINMENT": 730, + "RLJ": 730, + "ROADRUNNER RECORDS": 731, + "ROADSHOW ENTERTAINMENT": 732, + "ROADSHOW": 732, + "RONE": 733, + "RONIN FLIX": 734, + "ROTANA HOME ENTERTAINMENT": 735, + "ROTANA ENTERTAINMENT": 735, + "ROTANA HOME": 735, + "ROTANA": 735, + "ROUGH TRADE": 736, + "ROUNDER": 737, + "SAFFRON HILL FILMS": 738, + "SAFFRON HILL": 738, + "SAFFRON": 738, + "SAMUEL GOLDWYN FILMS": 739, + "SAMUEL GOLDWYN": 739, + "SAN FRANCISCO SYMPHONY": 740, + "SANDREW METRONOME": 741, + "SAPHRANE": 742, + "SAVOR": 743, + "SCANBOX ENTERTAINMENT": 744, + "SCANBOX": 744, + "SCENIC LABS": 745, + "SCHRÖDERMEDIA": 746, + "SCHRODERMEDIA": 746, + "SCHRODER MEDIA": 746, + "SCORPION RELEASING": 747, + "SCORPION": 747, + "SCREAM TEAM RELEASING": 748, + "SCREAM TEAM": 748, + "SCREEN MEDIA": 749, + "SCREEN": 749, + "SCREENBOUND PICTURES": 750, + "SCREENBOUND": 750, + "SCREENWAVE MEDIA": 751, + "SCREENWAVE": 751, + "SECOND RUN": 752, + "SECOND SIGHT": 753, + "SEEDSMAN GROUP": 754, + "SELECT VIDEO": 755, + "SELECTA VISION": 756, + "SENATOR": 757, + "SENTAI FILMWORKS": 758, + "SENTAI": 758, + "SEVEN7": 759, + "SEVERIN FILMS": 760, + "SEVERIN": 760, + "SEVILLE": 761, + "SEYONS ENTERTAINMENT": 762, + "SEYONS": 762, + "SF STUDIOS": 763, + "SGL ENTERTAINMENT": 764, + "SGL": 764, + "SHAMELESS": 765, + "SHAMROCK MEDIA": 766, + "SHAMROCK": 766, + "SHANGHAI EPIC MUSIC ENTERTAINMENT": 767, + "SHANGHAI EPIC ENTERTAINMENT": 767, + "SHANGHAI EPIC MUSIC": 767, + "SHANGHAI MUSIC ENTERTAINMENT": 767, + "SHANGHAI ENTERTAINMENT": 767, + "SHANGHAI MUSIC": 767, + "SHANGHAI": 767, + "SHEMAROO": 768, + "SHOCHIKU": 769, + "SHOCK": 770, + "SHOGAKU KAN": 771, + "SHOUT FACTORY": 772, + "SHOUT! FACTORY": 772, + "SHOUT": 772, + "SHOUT!": 772, + "SHOWBOX": 773, + "SHOWTIME ENTERTAINMENT": 774, + "SHOWTIME": 774, + "SHRIEK SHOW": 775, + "SHUDDER": 776, + "SIDONIS": 777, + "SIDONIS CALYSTA": 778, + "SIGNAL ONE ENTERTAINMENT": 779, + "SIGNAL ONE": 779, + "SIGNATURE ENTERTAINMENT": 780, + "SIGNATURE": 780, + "SILVER VISION": 781, + "SINISTER FILM": 782, + "SINISTER": 782, + "SIREN VISUAL ENTERTAINMENT": 783, + "SIREN VISUAL": 783, + "SIREN ENTERTAINMENT": 783, + "SIREN": 783, + "SKANI": 784, + "SKY DIGI": 785, + "SLASHER // VIDEO": 786, + "SLASHER / VIDEO": 786, + "SLASHER VIDEO": 786, + "SLASHER": 786, + "SLOVAK FILM INSTITUTE": 787, + "SLOVAK FILM": 787, + "SFI": 787, + "SM LIFE DESIGN GROUP": 788, + "SMOOTH PICTURES": 789, + "SMOOTH": 789, + "SNAPPER MUSIC": 790, + "SNAPPER": 790, + "SODA PICTURES": 791, + "SODA": 791, + "SONO LUMINUS": 792, + "SONY MUSIC": 793, + "SONY PICTURES": 794, + "SONY": 794, + "SONY PICTURES CLASSICS": 795, + "SONY CLASSICS": 795, + "SOUL MEDIA": 796, + "SOUL": 796, + "SOULFOOD MUSIC DISTRIBUTION": 797, + "SOULFOOD DISTRIBUTION": 797, + "SOULFOOD MUSIC": 797, + "SOULFOOD": 797, + "SOYUZ": 798, + "SPECTRUM": 799, + "SPENTZOS FILM": 800, + "SPENTZOS": 800, + "SPIRIT ENTERTAINMENT": 801, + "SPIRIT": 801, + "SPIRIT MEDIA GMBH": 802, + "SPIRIT MEDIA": 802, + "SPLENDID ENTERTAINMENT": 803, + "SPLENDID FILM": 804, + "SPO": 805, + "SQUARE ENIX": 806, + "SRI BALAJI VIDEO": 807, + "SRI BALAJI": 807, + "SRI": 807, + "SRI VIDEO": 807, + "SRS CINEMA": 808, + "SRS": 808, + "SSO RECORDINGS": 809, + "SSO": 809, + "ST2 MUSIC": 810, + "ST2": 810, + "STAR MEDIA ENTERTAINMENT": 811, + "STAR ENTERTAINMENT": 811, + "STAR MEDIA": 811, + "STAR": 811, + "STARLIGHT": 812, + "STARZ / ANCHOR BAY": 813, + "STARZ ANCHOR BAY": 813, + "STARZ": 813, + "ANCHOR BAY": 813, + "STER KINEKOR": 814, + "STERLING ENTERTAINMENT": 815, + "STERLING": 815, + "STINGRAY": 816, + "STOCKFISCH RECORDS": 817, + "STOCKFISCH": 817, + "STRAND RELEASING": 818, + "STRAND": 818, + "STUDIO 4K": 819, + "STUDIO CANAL": 820, + "STUDIO GHIBLI": 821, + "GHIBLI": 821, + "STUDIO HAMBURG ENTERPRISES": 822, + "HAMBURG ENTERPRISES": 822, + "STUDIO HAMBURG": 822, + "HAMBURG": 822, + "STUDIO S": 823, + "SUBKULTUR ENTERTAINMENT": 824, + "SUBKULTUR": 824, + "SUEVIA FILMS": 825, + "SUEVIA": 825, + "SUMMIT ENTERTAINMENT": 826, + "SUMMIT": 826, + "SUNFILM ENTERTAINMENT": 827, + "SUNFILM": 827, + "SURROUND RECORDS": 828, + "SURROUND": 828, + "SVENSK FILMINDUSTRI": 829, + "SVENSK": 829, + "SWEN FILMES": 830, + "SWEN FILMS": 830, + "SWEN": 830, + "SYNAPSE FILMS": 831, + "SYNAPSE": 831, + "SYNDICADO": 832, + "SYNERGETIC": 833, + "T- SERIES": 834, + "T-SERIES": 834, + "T SERIES": 834, + "TSERIES": 834, + "T.V.P.": 835, + "TVP": 835, + "TACET RECORDS": 836, + "TACET": 836, + "TAI SENG": 837, + "TAI SHENG": 838, + "TAKEONE": 839, + "TAKESHOBO": 840, + "TAMASA DIFFUSION": 841, + "TC ENTERTAINMENT": 842, + "TC": 842, + "TDK": 843, + "TEAM MARKETING": 844, + "TEATRO REAL": 845, + "TEMA DISTRIBUCIONES": 846, + "TEMPE DIGITAL": 847, + "TF1 VIDÉO": 848, + "TF1 VIDEO": 848, + "TF1": 848, + "THE BLU": 849, + "BLU": 849, + "THE ECSTASY OF FILMS": 850, + "THE FILM DETECTIVE": 851, + "FILM DETECTIVE": 851, + "THE JOKERS": 852, + "JOKERS": 852, + "THE ON": 853, + "ON": 853, + "THIMFILM": 854, + "THIM FILM": 854, + "THIM": 854, + "THIRD WINDOW FILMS": 855, + "THIRD WINDOW": 855, + "3RD WINDOW FILMS": 855, + "3RD WINDOW": 855, + "THUNDERBEAN ANIMATION": 856, + "THUNDERBEAN": 856, + "THUNDERBIRD RELEASING": 857, + "THUNDERBIRD": 857, + "TIBERIUS FILM": 858, + "TIME LIFE": 859, + "TIMELESS MEDIA GROUP": 860, + "TIMELESS MEDIA": 860, + "TIMELESS GROUP": 860, + "TIMELESS": 860, + "TLA RELEASING": 861, + "TLA": 861, + "TOBIS FILM": 862, + "TOBIS": 862, + "TOEI": 863, + "TOHO": 864, + "TOKYO SHOCK": 865, + "TOKYO": 865, + "TONPOOL MEDIEN GMBH": 866, + "TONPOOL MEDIEN": 866, + "TOPICS ENTERTAINMENT": 867, + "TOPICS": 867, + "TOUCHSTONE PICTURES": 868, + "TOUCHSTONE": 868, + "TRANSMISSION FILMS": 869, + "TRANSMISSION": 869, + "TRAVEL VIDEO STORE": 870, + "TRIART": 871, + "TRIGON FILM": 872, + "TRIGON": 872, + "TRINITY HOME ENTERTAINMENT": 873, + "TRINITY ENTERTAINMENT": 873, + "TRINITY HOME": 873, + "TRINITY": 873, + "TRIPICTURES": 874, + "TRI-PICTURES": 874, + "TRI PICTURES": 874, + "TROMA": 875, + "TURBINE MEDIEN": 876, + "TURTLE RECORDS": 877, + "TURTLE": 877, + "TVA FILMS": 878, + "TVA": 878, + "TWILIGHT TIME": 879, + "TWILIGHT": 879, + "TT": 879, + "TWIN CO., LTD.": 880, + "TWIN CO, LTD.": 880, + "TWIN CO., LTD": 880, + "TWIN CO, LTD": 880, + "TWIN CO LTD": 880, + "TWIN LTD": 880, + "TWIN CO.": 880, + "TWIN CO": 880, + "TWIN": 880, + "UCA": 881, + "UDR": 882, + "UEK": 883, + "UFA/DVD": 884, + "UFA DVD": 884, + "UFADVD": 884, + "UGC PH": 885, + "ULTIMATE3DHEAVEN": 886, + "ULTRA": 887, + "UMBRELLA ENTERTAINMENT": 888, + "UMBRELLA": 888, + "UMC": 889, + "UNCORK'D ENTERTAINMENT": 890, + "UNCORKD ENTERTAINMENT": 890, + "UNCORK D ENTERTAINMENT": 890, + "UNCORK'D": 890, + "UNCORK D": 890, + "UNCORKD": 890, + "UNEARTHED FILMS": 891, + "UNEARTHED": 891, + "UNI DISC": 892, + "UNIMUNDOS": 893, + "UNITEL": 894, + "UNIVERSAL MUSIC": 895, + "UNIVERSAL SONY PICTURES HOME ENTERTAINMENT": 896, + "UNIVERSAL SONY PICTURES ENTERTAINMENT": 896, + "UNIVERSAL SONY PICTURES HOME": 896, + "UNIVERSAL SONY PICTURES": 896, + "UNIVERSAL HOME ENTERTAINMENT": 896, + "UNIVERSAL ENTERTAINMENT": 896, + "UNIVERSAL HOME": 896, + "UNIVERSAL STUDIOS": 897, + "UNIVERSAL": 897, + "UNIVERSE LASER & VIDEO CO.": 898, + "UNIVERSE LASER AND VIDEO CO.": 898, + "UNIVERSE LASER & VIDEO CO": 898, + "UNIVERSE LASER AND VIDEO CO": 898, + "UNIVERSE LASER CO.": 898, + "UNIVERSE LASER CO": 898, + "UNIVERSE LASER": 898, + "UNIVERSUM FILM": 899, + "UNIVERSUM": 899, + "UTV": 900, + "VAP": 901, + "VCI": 902, + "VENDETTA FILMS": 903, + "VENDETTA": 903, + "VERSÁTIL HOME VIDEO": 904, + "VERSÁTIL VIDEO": 904, + "VERSÁTIL HOME": 904, + "VERSÁTIL": 904, + "VERSATIL HOME VIDEO": 904, + "VERSATIL VIDEO": 904, + "VERSATIL HOME": 904, + "VERSATIL": 904, + "VERTICAL ENTERTAINMENT": 905, + "VERTICAL": 905, + "VÉRTICE 360º": 906, + "VÉRTICE 360": 906, + "VERTICE 360o": 906, + "VERTICE 360": 906, + "VERTIGO BERLIN": 907, + "VÉRTIGO FILMS": 908, + "VÉRTIGO": 908, + "VERTIGO FILMS": 908, + "VERTIGO": 908, + "VERVE PICTURES": 909, + "VIA VISION ENTERTAINMENT": 910, + "VIA VISION": 910, + "VICOL ENTERTAINMENT": 911, + "VICOL": 911, + "VICOM": 912, + "VICTOR ENTERTAINMENT": 913, + "VICTOR": 913, + "VIDEA CDE": 914, + "VIDEO FILM EXPRESS": 915, + "VIDEO FILM": 915, + "VIDEO EXPRESS": 915, + "VIDEO MUSIC, INC.": 916, + "VIDEO MUSIC, INC": 916, + "VIDEO MUSIC INC.": 916, + "VIDEO MUSIC INC": 916, + "VIDEO MUSIC": 916, + "VIDEO SERVICE CORP.": 917, + "VIDEO SERVICE CORP": 917, + "VIDEO SERVICE": 917, + "VIDEO TRAVEL": 918, + "VIDEOMAX": 919, + "VIDEO MAX": 919, + "VII PILLARS ENTERTAINMENT": 920, + "VII PILLARS": 920, + "VILLAGE FILMS": 921, + "VINEGAR SYNDROME": 922, + "VINEGAR": 922, + "VS": 922, + "VINNY MOVIES": 923, + "VINNY": 923, + "VIRGIL FILMS & ENTERTAINMENT": 924, + "VIRGIL FILMS AND ENTERTAINMENT": 924, + "VIRGIL ENTERTAINMENT": 924, + "VIRGIL FILMS": 924, + "VIRGIL": 924, + "VIRGIN RECORDS": 925, + "VIRGIN": 925, + "VISION FILMS": 926, + "VISION": 926, + "VISUAL ENTERTAINMENT GROUP": 927, + "VISUAL GROUP": 927, + "VISUAL ENTERTAINMENT": 927, + "VISUAL": 927, + "VIVENDI VISUAL ENTERTAINMENT": 928, + "VIVENDI VISUAL": 928, + "VIVENDI": 928, + "VIZ PICTURES": 929, + "VIZ": 929, + "VLMEDIA": 930, + "VL MEDIA": 930, + "VL": 930, + "VOLGA": 931, + "VVS FILMS": 932, + "VVS": 932, + "VZ HANDELS GMBH": 933, + "VZ HANDELS": 933, + "WARD RECORDS": 934, + "WARD": 934, + "WARNER BROS.": 935, + "WARNER BROS": 935, + "WARNER ARCHIVE": 935, + "WARNER ARCHIVE COLLECTION": 935, + "WAC": 935, + "WARNER": 935, + "WARNER MUSIC": 936, + "WEA": 937, + "WEINSTEIN COMPANY": 938, + "WEINSTEIN": 938, + "WELL GO USA": 939, + "WELL GO": 939, + "WELTKINO FILMVERLEIH": 940, + "WEST VIDEO": 941, + "WEST": 941, + "WHITE PEARL MOVIES": 942, + "WHITE PEARL": 942, + "WICKED-VISION MEDIA": 943, + "WICKED VISION MEDIA": 943, + "WICKEDVISION MEDIA": 943, + "WICKED-VISION": 943, + "WICKED VISION": 943, + "WICKEDVISION": 943, + "WIENERWORLD": 944, + "WILD BUNCH": 945, + "WILD EYE RELEASING": 946, + "WILD EYE": 946, + "WILD SIDE VIDEO": 947, + "WILD SIDE": 947, + "WME": 948, + "WOLFE VIDEO": 949, + "WOLFE": 949, + "WORD ON FIRE": 950, + "WORKS FILM GROUP": 951, + "WORLD WRESTLING": 952, + "WVG MEDIEN": 953, + "WWE STUDIOS": 954, + "WWE": 954, + "X RATED KULT": 955, + "X-RATED KULT": 955, + "X RATED CULT": 955, + "X-RATED CULT": 955, + "X RATED": 955, + "X-RATED": 955, + "XCESS": 956, + "XLRATOR": 957, + "XT VIDEO": 958, + "XT": 958, + "YAMATO VIDEO": 959, + "YAMATO": 959, + "YASH RAJ FILMS": 960, + "YASH RAJS": 960, + "ZEITGEIST FILMS": 961, + "ZEITGEIST": 961, + "ZENITH PICTURES": 962, + "ZENITH": 962, + "ZIMA": 963, + "ZYLO": 964, + "ZYX MUSIC": 965, + "ZYX": 965, }.get(distributor, 0) return distributor_id - async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, filename=None, tracker_name=None): + async def prompt_user_for_id_selection( + self, tmdb=None, imdb=None, tvdb=None, filename=None, tracker_name=None + ): if not tracker_name: tracker_name = "Tracker" # Fallback if tracker_name is not provided if imdb: - imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") + imdb = str(imdb).zfill( + 7 + ) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print( + f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]" + ) if any([tmdb, imdb, tvdb]): console.print(f"[cyan]Found the following IDs on {tracker_name}:") @@ -161,11 +2119,17 @@ async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, fi console.print(f"TVDb ID: {tvdb}") if filename: - console.print(f"Filename: {filename}") # Ensure filename is printed if available + console.print( + f"Filename: {filename}" + ) # Ensure filename is printed if available - selection = input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ").strip().lower() + selection = ( + input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ") + .strip() + .lower() + ) try: - if selection == '' or selection == 'y' or selection == 'yes': + if selection == "" or selection == "y" or selection == "yes": return True else: return False @@ -174,24 +2138,32 @@ async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, fi async def prompt_user_for_confirmation(self, message): response = input(f"{message} (Y/n): ").strip().lower() - if response == '' or response == 'y': + if response == "" or response == "y": return True return False - async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=None, file_name=None): - tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 + async def unit3d_torrent_info( + self, tracker, torrent_url, search_url, meta, id=None, file_name=None + ): + tmdb = imdb = tvdb = description = category = infohash = mal = files = ( + None # noqa F841 + ) imagelist = [] # Build the params for the API request - params = {'api_token': self.config['TRACKERS'][tracker].get('api_key', '')} + params = {"api_token": self.config["TRACKERS"][tracker].get("api_key", "")} # Determine the URL based on whether we're searching by file name or ID if file_name: url = f"{search_url}?file_name={file_name}" - console.print(f"[green]Searching {tracker} by file name: [bold yellow]{file_name}[/bold yellow]") + console.print( + f"[green]Searching {tracker} by file name: [bold yellow]{file_name}[/bold yellow]" + ) elif id: url = f"{torrent_url}{id}?" - console.print(f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow] via {url}") + console.print( + f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow] via {url}" + ) else: console.print("[red]No ID or file name provided for search.[/red]") return None, None, None, None, None, None, None, None, None @@ -209,82 +2181,123 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N try: # Handle response when searching by file name (which might return a 'data' array) - data = json_response.get('data', []) + data = json_response.get("data", []) if data: - attributes = data[0].get('attributes', {}) + attributes = data[0].get("attributes", {}) # Extract data from the attributes - category = attributes.get('category') - description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') - infohash = attributes.get('info_hash') + category = attributes.get("category") + description = attributes.get("description") + tmdb = attributes.get("tmdb_id") + tvdb = attributes.get("tvdb_id") + mal = attributes.get("mal_id") + imdb = attributes.get("imdb_id") + infohash = attributes.get("info_hash") else: # Handle response when searching by ID if id and not data: - attributes = json_response.get('attributes', {}) + attributes = json_response.get("attributes", {}) # Extract data from the attributes - category = attributes.get('category') - description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') - infohash = attributes.get('info_hash') + category = attributes.get("category") + description = attributes.get("description") + tmdb = attributes.get("tmdb_id") + tvdb = attributes.get("tvdb_id") + mal = attributes.get("mal_id") + imdb = attributes.get("imdb_id") + infohash = attributes.get("info_hash") # Handle file name extraction - files = attributes.get('files', []) + files = attributes.get("files", []) if files: if len(files) == 1: - file_name = files[0]['name'] + file_name = files[0]["name"] else: - file_name = [file['name'] for file in files[:5]] # Return up to 5 filenames + file_name = [ + file["name"] for file in files[:5] + ] # Return up to 5 filenames - console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) + console.print( + f"[blue]Extracted filename(s): {file_name}[/blue]" + ) # Print the extracted filename(s) # Skip the ID selection prompt if searching by ID - console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") + console.print( + f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]" + ) if tmdb or imdb or tvdb: if not id: # Only prompt the user for ID selection if not searching by ID try: - if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): - console.print("[yellow]User chose to skip based on IDs.[/yellow]") + if not await self.prompt_user_for_id_selection( + tmdb, imdb, tvdb, file_name + ): + console.print( + "[yellow]User chose to skip based on IDs.[/yellow]" + ) return None, None, None, None, None, None, None, None, None except (KeyboardInterrupt, EOFError): sys.exit(1) if description: bbcode = BBCODE() - description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + description, imagelist = bbcode.clean_unit3d_description( + description, torrent_url + ) console.print(f"[green]Successfully grabbed description from {tracker}") - console.print(f"[blue]Extracted description: [yellow]{description}", markup=False) + console.print( + f"[blue]Extracted description: [yellow]{description}", markup=False + ) # Allow user to edit or discard the description - if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta.get('unattended'): - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + if not ( + meta.get("blu") + or meta.get("aither") + or meta.get("lst") + or meta.get("oe") + or meta.get("tik") + ) or meta.get("unattended"): + console.print( + "[cyan]Do you want to edit, discard or keep the description?[/cyan]" + ) + edit_choice = input( + "[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]" + ) - if edit_choice.lower() == 'e': + if edit_choice.lower() == "e": edited_description = click.edit(description) if edited_description: description = edited_description.strip() - console.print(f"[green]Final description after editing:[/green] {description}", markup=False) - elif edit_choice.lower() == 'd': + console.print( + f"[green]Final description after editing:[/green] {description}", + markup=False, + ) + elif edit_choice.lower() == "d": description = None console.print("[yellow]Description discarded.[/yellow]") else: - console.print("[green]Keeping the original description.[/green]") + console.print( + "[green]Keeping the original description.[/green]" + ) - return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name + return ( + tmdb, + imdb, + tvdb, + mal, + description, + category, + infohash, + imagelist, + file_name, + ) except Exception as e: console.print_exception() - console.print(f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]") + console.print( + f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]" + ) return None, None, None, None, None, None, None, None, None async def parseCookieFile(self, cookiefile): @@ -292,24 +2305,24 @@ async def parseCookieFile(self, cookiefile): compatible with requests.""" cookies = {} - with open(cookiefile, 'r') as fp: + with open(cookiefile, "r") as fp: for line in fp: if not line.startswith(("# ", "\n", "#\n")): - lineFields = re.split(' |\t', line.strip()) + lineFields = re.split(" |\t", line.strip()) lineFields = [x for x in lineFields if x != ""] cookies[lineFields[5]] = lineFields[6] return cookies async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): ptgen = "" - url = 'https://ptgen.zhenzhen.workers.dev' - if ptgen_site != '': + url = "https://ptgen.zhenzhen.workers.dev" + if ptgen_site != "": url = ptgen_site params = {} data = {} # get douban url - if int(meta.get('imdb_id', '0')) != 0: - data['search'] = f"tt{meta['imdb_id']}" + if int(meta.get("imdb_id", "0")) != 0: + data["search"] = f"tt{meta['imdb_id']}" ptgen = requests.get(url, params=data) if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): @@ -320,13 +2333,17 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): except requests.exceptions.JSONDecodeError: continue try: - params['url'] = ptgen.json()['data'][0]['link'] + params["url"] = ptgen.json()["data"][0]["link"] except Exception: console.print("[red]Unable to get data from ptgen using IMDb") - params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") + params["url"] = console.input( + "[red]Please enter [yellow]Douban[/yellow] link: " + ) else: console.print("[red]No IMDb id was found.") - params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") + params["url"] = console.input( + "[red]Please enter [yellow]Douban[/yellow] link: " + ) try: ptgen = requests.get(url, params=params) if ptgen.json()["error"] is not None: @@ -335,80 +2352,81 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): if ptgen.json()["error"] is None: break ptgen = ptgen.json() - meta['ptgen'] = ptgen - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + meta["ptgen"] = ptgen + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", "w") as f: json.dump(meta, f, indent=4) f.close() - ptgen = ptgen['format'] + ptgen = ptgen["format"] if "[/img]" in ptgen: ptgen = ptgen.split("[/img]")[1] ptgen = f"[img]{meta.get('imdb_info', {}).get('cover', meta.get('cover', ''))}[/img]{ptgen}" except Exception: console.print_exception() console.print(ptgen.text) - console.print("[bold red]There was an error getting the ptgen \nUploading without ptgen") + console.print( + "[bold red]There was an error getting the ptgen \nUploading without ptgen" + ) return "" return ptgen async def filter_dupes(self, dupes, meta): - if meta['debug']: + if meta["debug"]: console.log("[cyan]Pre-filtered dupes") console.log(dupes) new_dupes = [] for each in dupes: - if meta.get('sd', 0) == 1: + if meta.get("sd", 0) == 1: remove_set = set() else: - remove_set = set({meta['resolution']}) + remove_set = set({meta["resolution"]}) search_combos = [ { - 'search': meta['hdr'], - 'search_for': {'HDR', 'PQ10'}, - 'update': {'HDR|PQ10'} + "search": meta["hdr"], + "search_for": {"HDR", "PQ10"}, + "update": {"HDR|PQ10"}, }, + {"search": meta["hdr"], "search_for": {"DV"}, "update": {"DV|DoVi"}}, { - 'search': meta['hdr'], - 'search_for': {'DV'}, - 'update': {'DV|DoVi'} + "search": meta["hdr"], + "search_not": {"DV", "DoVi", "HDR", "PQ10"}, + "update": {"!(DV)|(DoVi)|(HDR)|(PQ10)"}, }, { - 'search': meta['hdr'], - 'search_not': {'DV', 'DoVi', 'HDR', 'PQ10'}, - 'update': {'!(DV)|(DoVi)|(HDR)|(PQ10)'} + "search": str(meta.get("tv_pack", 0)), + "search_for": "1", + "update": {rf"{meta['season']}(?!E\d+)"}, }, { - 'search': str(meta.get('tv_pack', 0)), - 'search_for': '1', - 'update': {rf"{meta['season']}(?!E\d+)"} + "search": meta["episode"], + "search_for": meta["episode"], + "update": {meta["season"], meta["episode"]}, }, - { - 'search': meta['episode'], - 'search_for': meta['episode'], - 'update': {meta['season'], meta['episode']} - } ] search_matches = [ - { - 'if': {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, - 'in': meta['type'] - } + {"if": {"REMUX", "WEBDL", "WEBRip", "HDTV"}, "in": meta["type"]} ] for s in search_combos: - if s.get('search_for') not in (None, ''): - if any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_for']): - remove_set.update(s['update']) - if s.get('search_not') not in (None, ''): - if not any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_not']): - remove_set.update(s['update']) + if s.get("search_for") not in (None, ""): + if any( + re.search(x, s["search"], flags=re.IGNORECASE) + for x in s["search_for"] + ): + remove_set.update(s["update"]) + if s.get("search_not") not in (None, ""): + if not any( + re.search(x, s["search"], flags=re.IGNORECASE) + for x in s["search_not"] + ): + remove_set.update(s["update"]) for sm in search_matches: - for a in sm['if']: - if a in sm['in']: + for a in sm["if"]: + if a in sm["in"]: remove_set.add(a) - search = each.lower().replace('-', '').replace(' ', '').replace('.', '') + search = each.lower().replace("-", "").replace(" ", "").replace(".", "") for x in remove_set.copy(): if "|" in x: - look_for = x.split('|') + look_for = x.split("|") for y in look_for: if y.lower() in search: if x in remove_set: @@ -421,7 +2439,10 @@ async def filter_dupes(self, dupes, meta): if not re.search(x, search, flags=re.I): allow = False else: - if re.search(x.replace("!", "", 1), search, flags=re.I) not in (None, False): + if re.search(x.replace("!", "", 1), search, flags=re.I) not in ( + None, + False, + ): allow = False if allow and each not in new_dupes: new_dupes.append(each) diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 5d376c917..c62753c65 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -15,89 +15,114 @@ from src.console import console -class FL(): +class FL: def __init__(self, config): self.config = config - self.tracker = 'FL' - self.source_flag = 'FL' - self.username = config['TRACKERS'][self.tracker].get('username', '').strip() - self.password = config['TRACKERS'][self.tracker].get('password', '').strip() - self.fltools = config['TRACKERS'][self.tracker].get('fltools', {}) - self.uploader_name = config['TRACKERS'][self.tracker].get('uploader_name') + self.tracker = "FL" + self.source_flag = "FL" + self.username = config["TRACKERS"][self.tracker].get("username", "").strip() + self.password = config["TRACKERS"][self.tracker].get("password", "").strip() + self.fltools = config["TRACKERS"][self.tracker].get("fltools", {}) + self.uploader_name = config["TRACKERS"][self.tracker].get("uploader_name") self.signature = None self.banned_groups = [""] async def get_category_id(self, meta): has_ro_audio, has_ro_sub = await self.get_ro_tracks(meta) # 25 = 3D Movie - if meta['category'] == 'MOVIE': + if meta["category"] == "MOVIE": # 4 = Movie HD cat_id = 4 - if meta['is_disc'] == "BDMV" or meta['type'] == "REMUX": + if meta["is_disc"] == "BDMV" or meta["type"] == "REMUX": # 20 = BluRay cat_id = 20 - if meta['resolution'] == '2160p': + if meta["resolution"] == "2160p": # 26 = 4k Movie - BluRay cat_id = 26 - elif meta['resolution'] == '2160p': + elif meta["resolution"] == "2160p": # 6 = 4k Movie cat_id = 6 - elif meta.get('sd', 0) == 1: + elif meta.get("sd", 0) == 1: # 1 = Movie SD cat_id = 1 - if has_ro_sub and meta.get('sd', 0) == 0 and meta['resolution'] != '2160p': + if has_ro_sub and meta.get("sd", 0) == 0 and meta["resolution"] != "2160p": # 19 = Movie + RO cat_id = 19 - if meta['category'] == 'TV': + if meta["category"] == "TV": # 21 = TV HD cat_id = 21 - if meta['resolution'] == '2160p': + if meta["resolution"] == "2160p": # 27 = TV 4k cat_id = 27 - elif meta.get('sd', 0) == 1: + elif meta.get("sd", 0) == 1: # 23 = TV SD cat_id = 23 - if meta['is_disc'] == "DVD": + if meta["is_disc"] == "DVD": # 2 = DVD cat_id = 2 if has_ro_sub: # 3 = DVD + RO cat_id = 3 - if meta.get('anime', False) is True: + if meta.get("anime", False) is True: # 24 = Anime cat_id = 24 return cat_id async def edit_name(self, meta): - fl_name = meta['name'] - if 'DV' in meta.get('hdr', ''): - fl_name = fl_name.replace(' DV ', ' DoVi ') - if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE'): - fl_name = fl_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1)) - fl_name = fl_name.replace(meta.get('aka', ''), '') - if meta.get('imdb_info'): - fl_name = fl_name.replace(meta['title'], meta['imdb_info']['aka']) - if meta['year'] != meta.get('imdb_info', {}).get('year', meta['year']) and str(meta['year']).strip() != '': - fl_name = fl_name.replace(str(meta['year']), str(meta['imdb_info']['year'])) - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': - fl_name = fl_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}") - if 'DD+' in meta.get('audio', '') and 'DDP' in meta['uuid']: - fl_name = fl_name.replace('DD+', 'DDP') - if 'Atmos' in meta.get('audio', '') and 'Atmos' not in meta['uuid']: - fl_name = fl_name.replace('Atmos', '') + fl_name = meta["name"] + if "DV" in meta.get("hdr", ""): + fl_name = fl_name.replace(" DV ", " DoVi ") + if meta.get("type") in ("WEBDL", "WEBRIP", "ENCODE"): + fl_name = fl_name.replace(meta["audio"], meta["audio"].replace(" ", "", 1)) + fl_name = fl_name.replace(meta.get("aka", ""), "") + if meta.get("imdb_info"): + fl_name = fl_name.replace(meta["title"], meta["imdb_info"]["aka"]) + if ( + meta["year"] != meta.get("imdb_info", {}).get("year", meta["year"]) + and str(meta["year"]).strip() != "" + ): + fl_name = fl_name.replace( + str(meta["year"]), str(meta["imdb_info"]["year"]) + ) + if ( + meta["category"] == "TV" + and meta.get("tv_pack", 0) == 0 + and meta.get("episode_title_storage", "").strip() != "" + and meta["episode"].strip() != "" + ): + fl_name = fl_name.replace( + meta["episode"], f"{meta['episode']} {meta['episode_title_storage']}" + ) + if "DD+" in meta.get("audio", "") and "DDP" in meta["uuid"]: + fl_name = fl_name.replace("DD+", "DDP") + if "Atmos" in meta.get("audio", "") and "Atmos" not in meta["uuid"]: + fl_name = fl_name.replace("Atmos", "") - fl_name = fl_name.replace('BluRay REMUX', 'Remux').replace('BluRay Remux', 'Remux').replace('Bluray Remux', 'Remux') - fl_name = fl_name.replace('PQ10', 'HDR').replace('HDR10+', 'HDR') - fl_name = fl_name.replace('DoVi HDR HEVC', 'HEVC DoVi HDR').replace('HDR HEVC', 'HEVC HDR').replace('DoVi HEVC', 'HEVC DoVi') - fl_name = fl_name.replace('DTS7.1', 'DTS').replace('DTS5.1', 'DTS').replace('DTS2.0', 'DTS').replace('DTS1.0', 'DTS') - fl_name = fl_name.replace('Dubbed', '').replace('Dual-Audio', '') - fl_name = ' '.join(fl_name.split()) + fl_name = ( + fl_name.replace("BluRay REMUX", "Remux") + .replace("BluRay Remux", "Remux") + .replace("Bluray Remux", "Remux") + ) + fl_name = fl_name.replace("PQ10", "HDR").replace("HDR10+", "HDR") + fl_name = ( + fl_name.replace("DoVi HDR HEVC", "HEVC DoVi HDR") + .replace("HDR HEVC", "HEVC HDR") + .replace("DoVi HEVC", "HEVC DoVi") + ) + fl_name = ( + fl_name.replace("DTS7.1", "DTS") + .replace("DTS5.1", "DTS") + .replace("DTS2.0", "DTS") + .replace("DTS1.0", "DTS") + ) + fl_name = fl_name.replace("Dubbed", "").replace("Dual-Audio", "") + fl_name = " ".join(fl_name.split()) fl_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) - fl_name = fl_name.replace(' ', '.').replace('..', '.') + fl_name = fl_name.replace(" ", ".").replace("..", ".") return fl_name async def upload(self, meta, disctype): @@ -110,12 +135,14 @@ async def upload(self, meta, disctype): # Confirm the correct naming order for FL cli_ui.info(f"Filelist name: {fl_name}") - if meta.get('unattended', False) is False: + if meta.get("unattended", False) is False: fl_confirm = cli_ui.ask_yes_no("Correct?", default=False) if fl_confirm is not True: - fl_name_manually = cli_ui.ask_string("Please enter a proper name", default="") + fl_name_manually = cli_ui.ask_string( + "Please enter a proper name", default="" + ) if fl_name_manually == "": - console.print('No proper name given') + console.print("No proper name given") console.print("Aborting...") return else: @@ -123,63 +150,92 @@ async def upload(self, meta, disctype): # Torrent File Naming # Note: Don't Edit .torrent filename after creation, SubsPlease anime releases (because of their weird naming) are an exception - if meta.get('anime', True) is True and meta.get('tag', '') == '-SubsPlease': + if meta.get("anime", True) is True and meta.get("tag", "") == "-SubsPlease": torrentFileName = fl_name else: - if meta.get('isdir', False) is False: - torrentFileName = meta.get('uuid') + if meta.get("isdir", False) is False: + torrentFileName = meta.get("uuid") torrentFileName = os.path.splitext(torrentFileName)[0] else: - torrentFileName = meta.get('uuid') + torrentFileName = meta.get("uuid") # Download new .torrent from site - fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() + fl_desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + newline="", + encoding="utf-8", + ).read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + if meta["bdinfo"] is not None: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() - with open(torrent_path, 'rb') as torrentFile: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ).read() + with open(torrent_path, "rb") as torrentFile: torrentFileName = unidecode(torrentFileName) files = { - 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + "file": ( + f"{torrentFileName}.torrent", + torrentFile, + "application/x-bittorent", + ) } data = { - 'name': fl_name, - 'type': cat_id, - 'descr': fl_desc.strip(), - 'nfo': mi_dump + "name": fl_name, + "type": cat_id, + "descr": fl_desc.strip(), + "nfo": mi_dump, } - if int(meta.get('imdb_id', '').replace('tt', '')) != 0: - data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') - data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: - data['epenis'] = self.uploader_name + if int(meta.get("imdb_id", "").replace("tt", "")) != 0: + data["imdbid"] = meta.get("imdb_id", "").replace("tt", "") + data["description"] = meta["imdb_info"].get("genres", "") + if ( + self.uploader_name not in ("", None) + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): + data["epenis"] = self.uploader_name if has_ro_audio: - data['materialro'] = 'on' - if meta['is_disc'] == "BDMV" or meta['type'] == "REMUX": - data['freeleech'] = 'on' - if int(meta.get('tv_pack', '0')) != 0: - data['freeleech'] = 'on' - if int(meta.get('freeleech', '0')) != 0: - data['freeleech'] = 'on' + data["materialro"] = "on" + if meta["is_disc"] == "BDMV" or meta["type"] == "REMUX": + data["freeleech"] = "on" + if int(meta.get("tv_pack", "0")) != 0: + data["freeleech"] = "on" + if int(meta.get("freeleech", "0")) != 0: + data["freeleech"] = "on" url = "https://filelist.io/takeupload.php" # Submit - if meta['debug']: + if meta["debug"]: console.print(url) console.print(data) else: with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") - with open(cookiefile, 'rb') as cf: + cookiefile = os.path.abspath( + f"{meta['base_dir']}/data/cookies/FL.pkl" + ) + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) up = session.post(url=url, data=data, files=files) torrentFile.close() # Match url to verify successful upload - match = re.match(r".*?filelist\.io/details\.php\?id=(\d+)&uploaded=(\d+)", up.url) + match = re.match( + r".*?filelist\.io/details\.php\?id=(\d+)&uploaded=(\d+)", up.url + ) if match: id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) await self.download_new_torrent(session, id, torrent_path) @@ -187,37 +243,43 @@ async def upload(self, meta, disctype): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 + raise UploadException( + f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", + "red", + ) # noqa F405 return async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) search_url = "https://filelist.io/browse.php" - if int(meta['imdb_id'].replace('tt', '')) != 0: + if int(meta["imdb_id"].replace("tt", "")) != 0: params = { - 'search': meta['imdb_id'], - 'cat': await self.get_category_id(meta), - 'searchin': '3' + "search": meta["imdb_id"], + "cat": await self.get_category_id(meta), + "searchin": "3", } else: params = { - 'search': meta['title'], - 'cat': await self.get_category_id(meta), - 'searchin': '0' + "search": meta["title"], + "cat": await self.get_category_id(meta), + "searchin": "0", } r = session.get(search_url, params=params) await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) + soup = BeautifulSoup(r.text, "html.parser") + find = soup.find_all("a", href=True) for each in find: - if each['href'].startswith('details.php?id=') and "&" not in each['href']: - dupes.append(each['title']) + if ( + each["href"].startswith("details.php?id=") + and "&" not in each["href"] + ): + dupes.append(each["title"]) return dupes @@ -227,7 +289,9 @@ async def validate_credentials(self, meta): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) if vcookie is not True: - console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') + console.print( + "[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid." + ) recreate = cli_ui.ask_yes_no("Log in again and create new session?") if recreate is True: if os.path.exists(cookiefile): @@ -243,11 +307,11 @@ async def validate_cookies(self, meta, cookiefile): url = "https://filelist.io/index.php" if os.path.exists(cookiefile): with requests.Session() as session: - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) resp = session.get(url=url) - if meta['debug']: - console.print('[cyan]Cookies:') + if meta["debug"]: + console.print("[cyan]Cookies:") console.print(session.cookies.get_dict()) console.print(resp.url) if resp.text.find("Logout") != -1: @@ -261,24 +325,26 @@ async def login(self, cookiefile): with requests.Session() as session: r = session.get("https://filelist.io/login.php") await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - validator = soup.find('input', {'name': 'validator'}).get('value') + soup = BeautifulSoup(r.text, "html.parser") + validator = soup.find("input", {"name": "validator"}).get("value") data = { - 'validator': validator, - 'username': self.username, - 'password': self.password, - 'unlock': '1', + "validator": validator, + "username": self.username, + "password": self.password, + "unlock": "1", } - response = session.post('https://filelist.io/takelogin.php', data=data) + response = session.post("https://filelist.io/takelogin.php", data=data) await asyncio.sleep(0.5) - index = 'https://filelist.io/index.php' + index = "https://filelist.io/index.php" response = session.get(index) if response.text.find("Logout") != -1: - console.print('[green]Successfully logged into FL') - with open(cookiefile, 'wb') as cf: + console.print("[green]Successfully logged into FL") + with open(cookiefile, "wb") as cf: pickle.dump(session.cookies, cf) else: - console.print('[bold red]Something went wrong while trying to log into FL') + console.print( + "[bold red]Something went wrong while trying to log into FL" + ) await asyncio.sleep(1) console.print(response.url) return @@ -290,50 +356,124 @@ async def download_new_torrent(self, session, id, torrent_path): with open(torrent_path, "wb") as tor: tor.write(r.content) else: - console.print("[red]There was an issue downloading the new .torrent from FL") + console.print( + "[red]There was an issue downloading the new .torrent from FL" + ) console.print(r.text) return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + newline="", + encoding="utf-8", + ) as descfile: from src.bbcode import BBCODE + bbcode = BBCODE() desc = base desc = bbcode.remove_spoiler(desc) desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_comparison_to_centered(desc, 900) - desc = desc.replace('[img]', '[img]').replace('[/img]', '[/img]') + desc = desc.replace("[img]", "[img]").replace("[/img]", "[/img]") desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) - if meta['is_disc'] != 'BDMV': + if meta["is_disc"] != "BDMV": url = "https://up.img4k.net/api/description" data = { - 'mediainfo': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), + "mediainfo": open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + ).read(), } - if int(meta['imdb_id'].replace('tt', '')) != 0: - data['imdbURL'] = f"tt{meta['imdb_id']}" - screen_glob = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['filename']}-*.png") + if int(meta["imdb_id"].replace("tt", "")) != 0: + data["imdbURL"] = f"tt{meta['imdb_id']}" + screen_glob = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", + f"{meta['filename']}-*.png", + ) files = [] for screen in screen_glob: - files.append(('images', (os.path.basename(screen), open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{screen}", 'rb'), 'image/png'))) - response = requests.post(url, data=data, files=files, auth=(self.fltools['user'], self.fltools['pass'])) - final_desc = response.text.replace('\r\n', '\n') + files.append( + ( + "images", + ( + os.path.basename(screen), + open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{screen}", + "rb", + ), + "image/png", + ), + ) + ) + response = requests.post( + url, + data=data, + files=files, + auth=(self.fltools["user"], self.fltools["pass"]), + ) + final_desc = response.text.replace("\r\n", "\n") else: # BD Description Generator - final_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_EXT.txt", 'r', encoding='utf-8').read() + final_desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_EXT.txt", + "r", + encoding="utf-8", + ).read() if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it - final_desc = final_desc.replace('[/pre][/quote]', f'[/pre][/quote]\n\n{desc}\n', 1) - final_desc = final_desc.replace('DISC INFO:', '[pre][quote=BD_Info][b][color=#FF0000]DISC INFO:[/color][/b]').replace('PLAYLIST REPORT:', '[b][color=#FF0000]PLAYLIST REPORT:[/color][/b]').replace('VIDEO:', '[b][color=#FF0000]VIDEO:[/color][/b]').replace('AUDIO:', '[b][color=#FF0000]AUDIO:[/color][/b]').replace('SUBTITLES:', '[b][color=#FF0000]SUBTITLES:[/color][/b]') + final_desc = final_desc.replace( + "[/pre][/quote]", f"[/pre][/quote]\n\n{desc}\n", 1 + ) + final_desc = ( + final_desc.replace( + "DISC INFO:", + "[pre][quote=BD_Info][b][color=#FF0000]DISC INFO:[/color][/b]", + ) + .replace( + "PLAYLIST REPORT:", + "[b][color=#FF0000]PLAYLIST REPORT:[/color][/b]", + ) + .replace("VIDEO:", "[b][color=#FF0000]VIDEO:[/color][/b]") + .replace("AUDIO:", "[b][color=#FF0000]AUDIO:[/color][/b]") + .replace( + "SUBTITLES:", "[b][color=#FF0000]SUBTITLES:[/color][/b]" + ) + ) final_desc += "[/pre][/quote]\n" # Closed bbcode tags # Upload screens and append to the end of the description url = "https://up.img4k.net/api/description" - screen_glob = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['filename']}-*.png") + screen_glob = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", + f"{meta['filename']}-*.png", + ) files = [] for screen in screen_glob: - files.append(('images', (os.path.basename(screen), open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{screen}", 'rb'), 'image/png'))) - response = requests.post(url, files=files, auth=(self.fltools['user'], self.fltools['pass'])) - final_desc += response.text.replace('\r\n', '\n') + files.append( + ( + "images", + ( + os.path.basename(screen), + open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{screen}", + "rb", + ), + "image/png", + ), + ) + ) + response = requests.post( + url, + files=files, + auth=(self.fltools["user"], self.fltools["pass"]), + ) + final_desc += response.text.replace("\r\n", "\n") descfile.write(final_desc) if self.signature is not None: @@ -342,20 +482,20 @@ async def edit_desc(self, meta): async def get_ro_tracks(self, meta): has_ro_audio = has_ro_sub = False - if meta.get('is_disc', '') != 'BDMV': - mi = meta['mediainfo'] - for track in mi['media']['track']: - if track['@type'] == "Text": - if track.get('Language') == "ro": + if meta.get("is_disc", "") != "BDMV": + mi = meta["mediainfo"] + for track in mi["media"]["track"]: + if track["@type"] == "Text": + if track.get("Language") == "ro": has_ro_sub = True - if track['@type'] == "Audio": - if track.get('Audio') == 'ro': + if track["@type"] == "Audio": + if track.get("Audio") == "ro": has_ro_audio = True else: - if "Romanian" in meta['bdinfo']['subtitles']: + if "Romanian" in meta["bdinfo"]["subtitles"]: has_ro_sub = True - for audio_track in meta['bdinfo']['audio']: - if audio_track['language'] == 'Romanian': + for audio_track in meta["bdinfo"]["audio"]: + if audio_track["language"] == "Romanian": has_ro_audio = True break return has_ro_audio, has_ro_sub diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index eb6ebaa42..ec72c5817 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -9,7 +9,7 @@ from src.console import console -class FNP(): +class FNP: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,116 +20,146 @@ class FNP(): def __init__(self, config): self.config = config - self.tracker = 'FNP' - self.source_flag = 'FnP' - self.upload_url = 'https://fearnopeer.com/api/torrents/upload' - self.search_url = 'https://fearnopeer.com/api/torrents/filter' + self.tracker = "FNP" + self.source_flag = "FnP" + self.upload_url = "https://fearnopeer.com/api/torrents/upload" + self.search_url = "https://fearnopeer.com/api/torrents/filter" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -144,25 +174,27 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index a59c42e84..26c4dbb5d 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -14,15 +14,15 @@ from torf import Torrent -class HDB(): +class HDB: def __init__(self, config): self.config = config - self.tracker = 'HDB' - self.source_flag = 'HDBits' - self.username = config['TRACKERS']['HDB'].get('username', '').strip() - self.passkey = config['TRACKERS']['HDB'].get('passkey', '').strip() - self.rehost_images = config['TRACKERS']['HDB'].get('img_rehost', False) + self.tracker = "HDB" + self.source_flag = "HDBits" + self.username = config["TRACKERS"]["HDB"].get("username", "").strip() + self.passkey = config["TRACKERS"]["HDB"].get("passkey", "").strip() + self.rehost_images = config["TRACKERS"]["HDB"].get("img_rehost", False) self.signature = None self.banned_groups = [""] @@ -34,64 +34,69 @@ async def get_type_category_id(self, meta): # 5 = Sport # 7 = PORN # 1 = Movie - if meta['category'] == 'MOVIE': + if meta["category"] == "MOVIE": cat_id = 1 # 2 = TV - if meta['category'] == 'TV': + if meta["category"] == "TV": cat_id = 2 # 3 = Documentary - if 'documentary' in meta.get("genres", "").lower() or 'documentary' in meta.get("keywords", "").lower(): + if ( + "documentary" in meta.get("genres", "").lower() + or "documentary" in meta.get("keywords", "").lower() + ): cat_id = 3 return cat_id async def get_type_codec_id(self, meta): codecmap = { - "AVC": 1, "H.264": 1, - "HEVC": 5, "H.265": 5, + "AVC": 1, + "H.264": 1, + "HEVC": 5, + "H.265": 5, "MPEG-2": 2, "VC-1": 3, "XviD": 4, - "VP9": 6 + "VP9": 6, } - searchcodec = meta.get('video_codec', meta.get('video_encode')) + searchcodec = meta.get("video_codec", meta.get("video_encode")) codec_id = codecmap.get(searchcodec, "EXIT") return codec_id async def get_type_medium_id(self, meta): medium_id = "EXIT" # 1 = Blu-ray / HD DVD - if meta.get('is_disc', '') in ("BDMV", "HD DVD"): + if meta.get("is_disc", "") in ("BDMV", "HD DVD"): medium_id = 1 # 4 = Capture - if meta.get('type', '') == "HDTV": + if meta.get("type", "") == "HDTV": medium_id = 4 - if meta.get('has_encode_settings', False) is True: + if meta.get("has_encode_settings", False) is True: medium_id = 3 # 3 = Encode - if meta.get('type', '') in ("ENCODE", "WEBRIP"): + if meta.get("type", "") in ("ENCODE", "WEBRIP"): medium_id = 3 # 5 = Remux - if meta.get('type', '') == "REMUX": + if meta.get("type", "") == "REMUX": medium_id = 5 # 6 = WEB-DL - if meta.get('type', '') == "WEBDL": + if meta.get("type", "") == "WEBDL": medium_id = 6 return medium_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def get_tags(self, meta): @@ -113,86 +118,110 @@ async def get_tags(self, meta): "PMTP": 69, "MA": 77, "SHO": 76, - "BCORE": 66, "CORE": 66, + "BCORE": 66, + "CORE": 66, "CRKL": 73, "FUNI": 74, "HLMK": 71, "HTSR": 79, "CRAV": 80, - 'MAX': 88 + "MAX": 88, } - if meta.get('service') in service_dict.keys(): - tags.append(service_dict.get(meta['service'])) + if meta.get("service") in service_dict.keys(): + tags.append(service_dict.get(meta["service"])) # Collections # Masters of Cinema, The Criterion Collection, Warner Archive Collection distributor_dict = { - "WARNER ARCHIVE": 68, "WARNER ARCHIVE COLLECTION": 68, "WAC": 68, - "CRITERION": 18, "CRITERION COLLECTION": 18, "CC": 18, - "MASTERS OF CINEMA": 19, "MOC": 19, - "KINO LORBER": 55, "KINO": 55, - "BFI VIDEO": 63, "BFI": 63, "BRITISH FILM INSTITUTE": 63, + "WARNER ARCHIVE": 68, + "WARNER ARCHIVE COLLECTION": 68, + "WAC": 68, + "CRITERION": 18, + "CRITERION COLLECTION": 18, + "CC": 18, + "MASTERS OF CINEMA": 19, + "MOC": 19, + "KINO LORBER": 55, + "KINO": 55, + "BFI VIDEO": 63, + "BFI": 63, + "BRITISH FILM INSTITUTE": 63, "STUDIO CANAL": 65, - "ARROW": 64 + "ARROW": 64, } - if meta.get('distributor') in distributor_dict.keys(): - tags.append(distributor_dict.get(meta['distributor'])) + if meta.get("distributor") in distributor_dict.keys(): + tags.append(distributor_dict.get(meta["distributor"])) # 4K Remaster, - if "IMAX" in meta.get('edition', ''): + if "IMAX" in meta.get("edition", ""): tags.append(14) - if "OPEN MATTE" in meta.get('edition', '').upper(): + if "OPEN MATTE" in meta.get("edition", "").upper(): tags.append(58) # Audio # DTS:X, Dolby Atmos, Auro-3D, Silent - if "DTS:X" in meta['audio']: + if "DTS:X" in meta["audio"]: tags.append(7) - if "Atmos" in meta['audio']: + if "Atmos" in meta["audio"]: tags.append(5) - if meta.get('silent', False) is True: - console.print('[yellow]zxx audio track found, suggesting you tag as silent') # 57 + if meta.get("silent", False) is True: + console.print( + "[yellow]zxx audio track found, suggesting you tag as silent" + ) # 57 # Video Metadata # HDR10, HDR10+, Dolby Vision, 10-bit, - if "HDR" in meta.get('hdr', ''): - if "HDR10+" in meta['hdr']: + if "HDR" in meta.get("hdr", ""): + if "HDR10+" in meta["hdr"]: tags.append(25) # HDR10+ else: tags.append(9) # HDR10 - if "DV" in meta.get('hdr', ''): + if "DV" in meta.get("hdr", ""): tags.append(6) # DV - if "HLG" in meta.get('hdr', ''): + if "HLG" in meta.get("hdr", ""): tags.append(10) # HLG return tags async def edit_name(self, meta): - hdb_name = meta['name'] - hdb_name = hdb_name.replace('H.265', 'HEVC') - if meta.get('source', '').upper() == 'WEB' and meta.get('service', '').strip() != '': - hdb_name = hdb_name.replace(f"{meta.get('service', '')} ", '', 1) - if 'DV' in meta.get('hdr', ''): - hdb_name = hdb_name.replace(' DV ', ' DoVi ') - if 'HDR' in meta.get('hdr', ''): - if 'HDR10+' not in meta['hdr']: - hdb_name = hdb_name.replace('HDR', 'HDR10') - if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE'): - hdb_name = hdb_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1).replace('Atmos', '')) + hdb_name = meta["name"] + hdb_name = hdb_name.replace("H.265", "HEVC") + if ( + meta.get("source", "").upper() == "WEB" + and meta.get("service", "").strip() != "" + ): + hdb_name = hdb_name.replace(f"{meta.get('service', '')} ", "", 1) + if "DV" in meta.get("hdr", ""): + hdb_name = hdb_name.replace(" DV ", " DoVi ") + if "HDR" in meta.get("hdr", ""): + if "HDR10+" not in meta["hdr"]: + hdb_name = hdb_name.replace("HDR", "HDR10") + if meta.get("type") in ("WEBDL", "WEBRIP", "ENCODE"): + hdb_name = hdb_name.replace( + meta["audio"], meta["audio"].replace(" ", "", 1).replace("Atmos", "") + ) else: - hdb_name = hdb_name.replace(meta['audio'], meta['audio'].replace('Atmos', '')) - hdb_name = hdb_name.replace(meta.get('aka', ''), '') - if meta.get('imdb_info'): - hdb_name = hdb_name.replace(meta['title'], meta['imdb_info']['aka']) - if str(meta['year']) != str(meta.get('imdb_info', {}).get('year', meta['year'])) and str(meta['year']).strip() != '': - hdb_name = hdb_name.replace(str(meta['year']), str(meta['imdb_info']['year'])) + hdb_name = hdb_name.replace( + meta["audio"], meta["audio"].replace("Atmos", "") + ) + hdb_name = hdb_name.replace(meta.get("aka", ""), "") + if meta.get("imdb_info"): + hdb_name = hdb_name.replace(meta["title"], meta["imdb_info"]["aka"]) + if ( + str(meta["year"]) + != str(meta.get("imdb_info", {}).get("year", meta["year"])) + and str(meta["year"]).strip() != "" + ): + hdb_name = hdb_name.replace( + str(meta["year"]), str(meta["imdb_info"]["year"]) + ) # Remove Dubbed/Dual-Audio from title - hdb_name = hdb_name.replace('PQ10', 'HDR') - hdb_name = hdb_name.replace('Dubbed', '').replace('Dual-Audio', '') - hdb_name = hdb_name.replace('REMUX', 'Remux') - hdb_name = ' '.join(hdb_name.split()) + hdb_name = hdb_name.replace("PQ10", "HDR") + hdb_name = hdb_name.replace("Dubbed", "").replace("Dual-Audio", "") + hdb_name = hdb_name.replace("REMUX", "Remux") + hdb_name = " ".join(hdb_name.split()) hdb_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) - hdb_name = hdb_name.replace(' .', '.').replace('..', '.') + hdb_name = hdb_name.replace(" .", ".").replace("..", ".") return hdb_name @@ -208,26 +237,41 @@ async def upload(self, meta, disctype): for each in (cat_id, codec_id, medium_id): if each == "EXIT": - console.print("[bold red]Something didn't map correctly, or this content is not allowed on HDB") + console.print( + "[bold red]Something didn't map correctly, or this content is not allowed on HDB" + ) return - if "Dual-Audio" in meta['audio'] and meta['is_disc'] not in ("BDMV", "HDDVD", "DVD"): + if "Dual-Audio" in meta["audio"] and meta["is_disc"] not in ( + "BDMV", + "HDDVD", + "DVD", + ): console.print("[bold red]Dual-Audio Encodes are not allowed") return # Download new .torrent from site - hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + hdb_desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" torrent = Torrent.read(torrent_path) # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed if torrent.piece_size > 16777216: # 16 MiB in bytes - console.print("[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent") + console.print( + "[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent" + ) # Import Prep and regenerate the torrent with 16 MiB piece size limit from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - if meta['is_disc'] == 1: + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) + + if meta["is_disc"] == 1: include = [] exclude = [] else: @@ -236,7 +280,7 @@ async def upload(self, meta, disctype): # Create a new torrent with piece size explicitly set to 16 MiB new_torrent = prep.CustomTorrent( - path=Path(meta['path']), + path=Path(meta["path"]), trackers=["https://fake.tracker"], source="L4G", private=True, @@ -244,12 +288,14 @@ async def upload(self, meta, disctype): include_globs=include, # Ensure this is always a list creation_date=datetime.now(), comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" + created_by="L4G's Upload Assistant", ) # Explicitly set the piece size and update metainfo new_torrent.piece_size = 16777216 # 16 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + new_torrent.metainfo["info"][ + "piece length" + ] = 16777216 # Ensure 'piece length' is set # Validate and write the new torrent new_torrent.validate_piece_size() @@ -257,45 +303,62 @@ async def upload(self, meta, disctype): new_torrent.write(torrent_path, overwrite=True) # Proceed with the upload process - with open(torrent_path, 'rb') as torrentFile: - if len(meta['filelist']) == 1: - torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) + with open(torrent_path, "rb") as torrentFile: + if len(meta["filelist"]) == 1: + torrentFileName = unidecode( + os.path.basename(meta["video"]).replace(" ", ".") + ) else: - torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) + torrentFileName = unidecode( + os.path.basename(meta["path"]).replace(" ", ".") + ) files = { - 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorrent") + "file": ( + f"{torrentFileName}.torrent", + torrentFile, + "application/x-bittorrent", + ) } data = { - 'name': hdb_name, - 'category': cat_id, - 'codec': codec_id, - 'medium': medium_id, - 'origin': 0, - 'descr': hdb_desc.rstrip(), - 'techinfo': '', - 'tags[]': hdb_tags, + "name": hdb_name, + "category": cat_id, + "codec": codec_id, + "medium": medium_id, + "origin": 0, + "descr": hdb_desc.rstrip(), + "techinfo": "", + "tags[]": hdb_tags, } # If internal, set 1 - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 # If not BDMV fill mediainfo - if meta.get('is_disc', '') != "BDMV": - data['techinfo'] = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + if meta.get("is_disc", "") != "BDMV": + data["techinfo"] = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ).read() # If tv, submit tvdb_id/season/episode - if meta.get('tvdb_id', 0) != 0: - data['tvdb'] = meta['tvdb_id'] - if int(meta.get('imdb_id', '').replace('tt', '')) != 0: - data['imdb'] = f"https://www.imdb.com/title/tt{meta.get('imdb_id', '').replace('tt', '')}/", - if meta.get('category') == 'TV': - data['tvdb_season'] = int(meta.get('season_int', 1)) - data['tvdb_episode'] = int(meta.get('episode_int', 1)) + if meta.get("tvdb_id", 0) != 0: + data["tvdb"] = meta["tvdb_id"] + if int(meta.get("imdb_id", "").replace("tt", "")) != 0: + data["imdb"] = ( + f"https://www.imdb.com/title/tt{meta.get('imdb_id', '').replace('tt', '')}/", + ) + if meta.get("category") == "TV": + data["tvdb_season"] = int(meta.get("season_int", 1)) + data["tvdb_episode"] = int(meta.get("episode_int", 1)) # aniDB url = "https://hdbits.org/upload/upload" # Submit - if meta['debug']: + if meta["debug"]: console.print(url) console.print(data) else: @@ -306,7 +369,9 @@ async def upload(self, meta, disctype): torrentFile.close() # Match url to verify successful upload - match = re.match(r".*?hdbits\.org/details\.php\?id=(\d+)&uploaded=(\d+)", up.url) + match = re.match( + r".*?hdbits\.org/details\.php\?id=(\d+)&uploaded=(\d+)", up.url + ) if match: id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) await self.download_new_torrent(id, torrent_path) @@ -314,7 +379,10 @@ async def upload(self, meta, disctype): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 + raise UploadException( + f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", + "red", + ) # noqa F405 return async def search_existing(self, meta, disctype): @@ -322,25 +390,27 @@ async def search_existing(self, meta, disctype): console.print("[yellow]Searching for existing torrents on site...") url = "https://hdbits.org/api/torrents" data = { - 'username': self.username, - 'passkey': self.passkey, - 'category': await self.get_type_category_id(meta), - 'codec': await self.get_type_codec_id(meta), - 'medium': await self.get_type_medium_id(meta), - 'search': meta['resolution'] + "username": self.username, + "passkey": self.passkey, + "category": await self.get_type_category_id(meta), + "codec": await self.get_type_codec_id(meta), + "medium": await self.get_type_medium_id(meta), + "search": meta["resolution"], } - if int(meta.get('imdb_id', '0').replace('tt', '0')) != 0: - data['imdb'] = {'id': meta['imdb_id']} - if int(meta.get('tvdb_id', '0')) != 0: - data['tvdb'] = {'id': meta['tvdb_id']} + if int(meta.get("imdb_id", "0").replace("tt", "0")) != 0: + data["imdb"] = {"id": meta["imdb_id"]} + if int(meta.get("tvdb_id", "0")) != 0: + data["tvdb"] = {"id": meta["tvdb_id"]} try: response = requests.get(url=url, data=json.dumps(data)) response = response.json() - for each in response['data']: - result = each['name'] + for each in response["data"]: + result = each["name"] dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your passkey is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your passkey is incorrect" + ) await asyncio.sleep(5) return dupes @@ -349,22 +419,23 @@ async def validate_credentials(self, meta): vapi = await self.validate_api() vcookie = await self.validate_cookies(meta) if vapi is not True: - console.print('[red]Failed to validate API. Please confirm that the site is up and your passkey is valid.') + console.print( + "[red]Failed to validate API. Please confirm that the site is up and your passkey is valid." + ) return False if vcookie is not True: - console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') + console.print( + "[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid." + ) return False return True async def validate_api(self): url = "https://hdbits.org/api/test" - data = { - 'username': self.username, - 'passkey': self.passkey - } + data = {"username": self.username, "passkey": self.passkey} try: r = requests.post(url, data=json.dumps(data)).json() - if r.get('status', 5) == 0: + if r.get("status", 5) == 0: return True return False except Exception: @@ -378,8 +449,8 @@ async def validate_cookies(self, meta): with requests.Session() as session: session.cookies.update(await common.parseCookieFile(cookiefile)) resp = session.get(url=url) - if meta['debug']: - console.print('[cyan]Cookies:') + if meta["debug"]: + console.print("[cyan]Cookies:") console.print(session.cookies.get_dict()) console.print("\n\n") console.print(resp.text) @@ -394,20 +465,13 @@ async def validate_cookies(self, meta): async def download_new_torrent(self, id, torrent_path): # Get HDB .torrent filename api_url = "https://hdbits.org/api/torrents" - data = { - 'username': self.username, - 'passkey': self.passkey, - 'id': id - } + data = {"username": self.username, "passkey": self.passkey, "id": id} r = requests.get(url=api_url, data=json.dumps(data)) - filename = r.json()['data'][0]['filename'] + filename = r.json()["data"][0]["filename"] # Download new .torrent download_url = f"https://hdbits.org/download.php/{quote(filename)}" - params = { - 'passkey': self.passkey, - 'id': id - } + params = {"passkey": self.passkey, "id": id} r = requests.get(url=download_url, params=params) with open(torrent_path, "wb") as tor: @@ -415,30 +479,51 @@ async def download_new_torrent(self, id, torrent_path): return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as descfile: from src.bbcode import BBCODE + # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: - descfile.write(f"[center][quote]This release is sourced from {meta['service_longname']}[/quote][/center]") + if ( + meta["type"] == "WEBDL" + and meta.get("service_longname", "") != "" + and meta.get("description", None) is None + ): + descfile.write( + f"[center][quote]This release is sourced from {meta['service_longname']}[/quote][/center]" + ) bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[quote=VOB MediaInfo]{discs[0]['vob_mi']}[/quote]\n") + if meta.get("discs", []) != []: + discs = meta["discs"] + if discs[0]["type"] == "DVD": + descfile.write( + f"[quote=VOB MediaInfo]{discs[0]['vob_mi']}[/quote]\n" + ) descfile.write("\n") - if discs[0]['type'] == "BDMV": + if discs[0]["type"] == "BDMV": descfile.write(f"[quote]{discs[0]['summary'].strip()}[/quote]\n") descfile.write("\n") if len(discs) >= 2: for each in discs[1:]: - if each['type'] == "BDMV": - descfile.write(f"[quote={each.get('name', 'BDINFO')}]{each['summary']}[/quote]\n") + if each["type"] == "BDMV": + descfile.write( + f"[quote={each.get('name', 'BDINFO')}]{each['summary']}[/quote]\n" + ) descfile.write("\n") pass - if each['type'] == "DVD": + if each["type"] == "DVD": descfile.write(f"{each['name']}:\n") - descfile.write(f"[quote={os.path.basename(each['vob'])}][{each['vob_mi']}[/quote] [quote={os.path.basename(each['ifo'])}][{each['ifo_mi']}[/quote]\n") + descfile.write( + f"[quote={os.path.basename(each['vob'])}][{each['vob_mi']}[/quote] [quote={os.path.basename(each['ifo'])}][{each['ifo_mi']}[/quote]\n" + ) descfile.write("\n") desc = base desc = bbcode.convert_code_to_quote(desc) @@ -451,12 +536,12 @@ async def edit_desc(self, meta): hdbimg_bbcode = await self.hdbimg_upload(meta) descfile.write(f"{hdbimg_bbcode}") else: - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - img_url = images[each]['img_url'] - web_url = images[each]['web_url'] + for each in range(len(images[: int(meta["screens"])])): + img_url = images[each]["img_url"] + web_url = images[each]["web_url"] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") if self.signature is not None: @@ -464,23 +549,27 @@ async def edit_desc(self, meta): descfile.close() async def hdbimg_upload(self, meta): - images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") + images = glob.glob( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png" + ) url = "https://img.hdbits.org/upload_api.php" data = { - 'username': self.username, - 'passkey': self.passkey, - 'galleryoption': 1, - 'galleryname': meta['name'], - 'thumbsize': 'w300' + "username": self.username, + "passkey": self.passkey, + "galleryoption": 1, + "galleryname": meta["name"], + "thumbsize": "w300", } files = {} # Set maximum screenshots to 3 for tv singles and 6 for everthing else - hdbimg_screen_count = 3 if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 else 6 + hdbimg_screen_count = ( + 3 if meta["category"] == "TV" and meta.get("tv_pack", 0) == 0 else 6 + ) if len(images) < hdbimg_screen_count: hdbimg_screen_count = len(images) for i in range(hdbimg_screen_count): - files[f'images_files[{i}]'] = open(images[i], 'rb') + files[f"images_files[{i}]"] = open(images[i], "rb") r = requests.post(url=url, data=data, files=files) image_bbcode = r.text return image_bbcode @@ -488,25 +577,23 @@ async def hdbimg_upload(self, meta): async def get_info_from_torrent_id(self, hdb_id): hdb_imdb = hdb_name = hdb_torrenthash = None url = "https://hdbits.org/api/torrents" - data = { - "username": self.username, - "passkey": self.passkey, - "id": hdb_id - } + data = {"username": self.username, "passkey": self.passkey, "id": hdb_id} response = requests.get(url, json=data) if response.ok: try: response = response.json() - if response['data'] != []: - hdb_imdb = response['data'][0].get('imdb', {'id': None}).get('id') - hdb_tvdb = response['data'][0].get('tvdb', {'id': None}).get('id') - hdb_name = response['data'][0]['name'] - hdb_torrenthash = response['data'][0]['hash'] + if response["data"] != []: + hdb_imdb = response["data"][0].get("imdb", {"id": None}).get("id") + hdb_tvdb = response["data"][0].get("tvdb", {"id": None}).get("id") + hdb_name = response["data"][0]["name"] + hdb_torrenthash = response["data"][0]["hash"] except Exception: console.print_exception() else: - console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") + console.print( + "Failed to get info from HDB ID. Either the site is down or your credentials are invalid" + ) return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash async def search_filename(self, search_term, search_file_folder, meta): @@ -514,13 +601,15 @@ async def search_filename(self, search_term, search_file_folder, meta): url = "https://hdbits.org/api/torrents" # Handle disc case - if search_file_folder == 'folder' and meta.get('is_disc'): - bd_summary_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'BD_SUMMARY_00.txt') + if search_file_folder == "folder" and meta.get("is_disc"): + bd_summary_path = os.path.join( + meta["base_dir"], "tmp", meta["uuid"], "BD_SUMMARY_00.txt" + ) bd_summary = None # Parse the BD_SUMMARY_00.txt file to extract the Disc Title try: - with open(bd_summary_path, 'r', encoding='utf-8') as file: + with open(bd_summary_path, "r", encoding="utf-8") as file: for line in file: if "Disc Title:" in line: bd_summary = line.split("Disc Title:")[1].strip() @@ -531,12 +620,16 @@ async def search_filename(self, search_term, search_file_folder, meta): "username": self.username, "passkey": self.passkey, "limit": 100, - "search": bd_summary # Using the Disc Title for search + "search": bd_summary, # Using the Disc Title for search } - console.print(f"[green]Searching HDB for disc title: [bold yellow]{bd_summary}[/bold yellow]") + console.print( + f"[green]Searching HDB for disc title: [bold yellow]{bd_summary}[/bold yellow]" + ) # console.print(f"[yellow]Using this data: {data}") else: - console.print(f"[red]Error: 'Disc Title' not found in {bd_summary_path}[/red]") + console.print( + f"[red]Error: 'Disc Title' not found in {bd_summary_path}[/red]" + ) return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id except FileNotFoundError: @@ -548,9 +641,11 @@ async def search_filename(self, search_term, search_file_folder, meta): "username": self.username, "passkey": self.passkey, "limit": 100, - "file_in_torrent": os.path.basename(search_term) + "file_in_torrent": os.path.basename(search_term), } - console.print(f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + console.print( + f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]" + ) # console.print(f"[yellow]Using this data: {data}") response = requests.get(url, json=data) @@ -560,26 +655,36 @@ async def search_filename(self, search_term, search_file_folder, meta): response_json = response.json() # console.print(f"[green]HDB API response: {response_json}[/green]") # Log the entire response for debugging - if 'data' not in response_json: - console.print(f"[red]Error: 'data' key not found in HDB API response. Full response: {response_json}[/red]") + if "data" not in response_json: + console.print( + f"[red]Error: 'data' key not found in HDB API response. Full response: {response_json}[/red]" + ) return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id - if response_json['data'] != []: - for each in response_json['data']: - hdb_imdb = each.get('imdb', {'id': None}).get('id') - hdb_tvdb = each.get('tvdb', {'id': None}).get('id') - hdb_name = each['name'] - hdb_torrenthash = each['hash'] - hdb_id = each['id'] - console.print(f'[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]') + if response_json["data"] != []: + for each in response_json["data"]: + hdb_imdb = each.get("imdb", {"id": None}).get("id") + hdb_tvdb = each.get("tvdb", {"id": None}).get("id") + hdb_name = each["name"] + hdb_torrenthash = each["hash"] + hdb_id = each["id"] + console.print( + f"[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]" + ) return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id else: - console.print('[yellow]No data found in the HDB API response[/yellow]') + console.print( + "[yellow]No data found in the HDB API response[/yellow]" + ) except Exception as e: console.print_exception() - console.print(f"[red]Failed to parse HDB API response. Error: {str(e)}[/red]") + console.print( + f"[red]Failed to parse HDB API response. Error: {str(e)}[/red]" + ) else: - console.print(f"[red]Failed to get info from HDB. Status code: {response.status_code}, Reason: {response.reason}[/red]") + console.print( + f"[red]Failed to get info from HDB. Status code: {response.status_code}, Reason: {response.reason}[/red]" + ) - console.print('[yellow]Could not find a matching release on HDB[/yellow]') + console.print("[yellow]Could not find a matching release on HDB[/yellow]") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index a1117b67d..cb0bbae73 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -12,31 +12,31 @@ from src.console import console -class HDT(): +class HDT: def __init__(self, config): self.config = config - self.tracker = 'HDT' - self.source_flag = 'hd-torrents.org' - self.username = config['TRACKERS'][self.tracker].get('username', '').strip() - self.password = config['TRACKERS'][self.tracker].get('password', '').strip() + self.tracker = "HDT" + self.source_flag = "hd-torrents.org" + self.username = config["TRACKERS"][self.tracker].get("username", "").strip() + self.password = config["TRACKERS"][self.tracker].get("password", "").strip() self.signature = None self.banned_groups = [""] async def get_category_id(self, meta): - if meta['category'] == 'MOVIE': + if meta["category"] == "MOVIE": # BDMV - if meta.get('is_disc', '') == "BDMV" or meta.get('type', '') == "DISC": - if meta['resolution'] == '2160p': + if meta.get("is_disc", "") == "BDMV" or meta.get("type", "") == "DISC": + if meta["resolution"] == "2160p": # 70 = Movie/UHD/Blu-Ray cat_id = 70 - if meta['resolution'] in ('1080p', '1080i'): + if meta["resolution"] in ("1080p", "1080i"): # 1 = Movie/Blu-Ray cat_id = 1 # REMUX - if meta.get('type', '') == 'REMUX': - if meta.get('uhd', '') == 'UHD' and meta['resolution'] == '2160p': + if meta.get("type", "") == "REMUX": + if meta.get("uhd", "") == "UHD" and meta["resolution"] == "2160p": # 71 = Movie/UHD/Remux cat_id = 71 else: @@ -44,30 +44,30 @@ async def get_category_id(self, meta): cat_id = 2 # REST OF THE STUFF - if meta.get('type', '') not in ("DISC", "REMUX"): - if meta['resolution'] == '2160p': + if meta.get("type", "") not in ("DISC", "REMUX"): + if meta["resolution"] == "2160p": # 64 = Movie/2160p cat_id = 64 - elif meta['resolution'] in ('1080p', '1080i'): + elif meta["resolution"] in ("1080p", "1080i"): # 5 = Movie/1080p/i cat_id = 5 - elif meta['resolution'] == '720p': + elif meta["resolution"] == "720p": # 3 = Movie/720p cat_id = 3 - if meta['category'] == 'TV': + if meta["category"] == "TV": # BDMV - if meta.get('is_disc', '') == "BDMV" or meta.get('type', '') == "DISC": - if meta['resolution'] == '2160p': + if meta.get("is_disc", "") == "BDMV" or meta.get("type", "") == "DISC": + if meta["resolution"] == "2160p": # 72 = TV Show/UHD/Blu-ray cat_id = 72 - if meta['resolution'] in ('1080p', '1080i'): + if meta["resolution"] in ("1080p", "1080i"): # 59 = TV Show/Blu-ray cat_id = 59 # REMUX - if meta.get('type', '') == 'REMUX': - if meta.get('uhd', '') == 'UHD' and meta['resolution'] == '2160p': + if meta.get("type", "") == "REMUX": + if meta.get("uhd", "") == "UHD" and meta["resolution"] == "2160p": # 73 = TV Show/UHD/Remux cat_id = 73 else: @@ -75,31 +75,39 @@ async def get_category_id(self, meta): cat_id = 60 # REST OF THE STUFF - if meta.get('type', '') not in ("DISC", "REMUX"): - if meta['resolution'] == '2160p': + if meta.get("type", "") not in ("DISC", "REMUX"): + if meta["resolution"] == "2160p": # 65 = TV Show/2160p cat_id = 65 - elif meta['resolution'] in ('1080p', '1080i'): + elif meta["resolution"] in ("1080p", "1080i"): # 30 = TV Show/1080p/i cat_id = 30 - elif meta['resolution'] == '720p': + elif meta["resolution"] == "720p": # 38 = TV Show/720p cat_id = 38 return cat_id async def edit_name(self, meta): - hdt_name = meta['name'] - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '': - hdt_name = hdt_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}") - if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE'): - hdt_name = hdt_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1)) - if 'DV' in meta.get('hdr', ''): - hdt_name = hdt_name.replace(' DV ', ' DoVi ') - - hdt_name = ' '.join(hdt_name.split()) + hdt_name = meta["name"] + if ( + meta["category"] == "TV" + and meta.get("tv_pack", 0) == 0 + and meta.get("episode_title_storage", "").strip() != "" + ): + hdt_name = hdt_name.replace( + meta["episode"], f"{meta['episode']} {meta['episode_title_storage']}" + ) + if meta.get("type") in ("WEBDL", "WEBRIP", "ENCODE"): + hdt_name = hdt_name.replace( + meta["audio"], meta["audio"].replace(" ", "", 1) + ) + if "DV" in meta.get("hdr", ""): + hdt_name = hdt_name.replace(" DV ", " DoVi ") + + hdt_name = " ".join(hdt_name.split()) hdt_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) - hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') + hdt_name = hdt_name.replace(":", "").replace("..", " ").replace(" ", " ") return hdt_name async def upload(self, meta, disctype): @@ -111,85 +119,113 @@ async def upload(self, meta, disctype): # Confirm the correct naming order for HDT cli_ui.info(f"HDT name: {hdt_name}") - if meta.get('unattended', False) is False: + if meta.get("unattended", False) is False: hdt_confirm = cli_ui.ask_yes_no("Correct?", default=False) if hdt_confirm is not True: - hdt_name_manually = cli_ui.ask_string("Please enter a proper name", default="") + hdt_name_manually = cli_ui.ask_string( + "Please enter a proper name", default="" + ) if hdt_name_manually == "": - console.print('No proper name given') + console.print("No proper name given") console.print("Aborting...") return else: hdt_name = hdt_name_manually # Upload - hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() + hdt_desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + newline="", + encoding="utf-8", + ).read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - with open(torrent_path, 'rb') as torrentFile: + with open(torrent_path, "rb") as torrentFile: torrentFileName = unidecode(hdt_name) files = { - 'torrent': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") - } - data = { - 'filename': hdt_name, - 'category': cat_id, - 'info': hdt_desc.strip() + "torrent": ( + f"{torrentFileName}.torrent", + torrentFile, + "application/x-bittorent", + ) } + data = {"filename": hdt_name, "category": cat_id, "info": hdt_desc.strip()} # 3D - if "3D" in meta.get('3d', ''): - data['3d'] = 'true' + if "3D" in meta.get("3d", ""): + data["3d"] = "true" # HDR - if "HDR" in meta.get('hdr', ''): - if "HDR10+" in meta['hdr']: - data['HDR10'] = 'true' - data['HDR10Plus'] = 'true' + if "HDR" in meta.get("hdr", ""): + if "HDR10+" in meta["hdr"]: + data["HDR10"] = "true" + data["HDR10Plus"] = "true" else: - data['HDR10'] = 'true' - if "DV" in meta.get('hdr', ''): - data['DolbyVision'] = 'true' + data["HDR10"] = "true" + if "DV" in meta.get("hdr", ""): + data["DolbyVision"] = "true" # IMDB - if int(meta.get('imdb_id', '').replace('tt', '')) != 0: - data['infosite'] = f"https://www.imdb.com/title/tt{meta['imdb_id']}/" + if int(meta.get("imdb_id", "").replace("tt", "")) != 0: + data["infosite"] = f"https://www.imdb.com/title/tt{meta['imdb_id']}/" # Full Season Pack - if int(meta.get('tv_pack', '0')) != 0: - data['season'] = 'true' + if int(meta.get("tv_pack", "0")) != 0: + data["season"] = "true" else: - data['season'] = 'false' + data["season"] = "false" # Anonymous check - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: - data['anonymous'] = 'false' + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): + data["anonymous"] = "false" else: - data['anonymous'] = 'true' + data["anonymous"] = "true" # Send url = "https://hd-torrents.org/upload.php" - if meta['debug']: + if meta["debug"]: console.print(url) console.print(data) else: with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") + cookiefile = os.path.abspath( + f"{meta['base_dir']}/data/cookies/HDT.txt" + ) session.cookies.update(await common.parseCookieFile(cookiefile)) up = session.post(url=url, data=data, files=files) torrentFile.close() # Match url to verify successful upload - search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text).group(1) + search = re.search( + r"download\.php\?id\=([a-z0-9]+)", up.text + ).group(1) if search: # modding existing torrent for adding to client instead of downloading torrent from site. - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "https://hd-torrents.org/details.php?id=" + search) + await common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.config["TRACKERS"]["HDT"].get("my_announce_url"), + "https://hd-torrents.org/details.php?id=" + search, + ) else: console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 + raise UploadException( + f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", + "red", + ) # noqa F405 return async def search_existing(self, meta, disctype): @@ -201,28 +237,28 @@ async def search_existing(self, meta, disctype): search_url = "https://hd-torrents.org/torrents.php" csrfToken = await self.get_csrfToken(session, search_url) - if int(meta['imdb_id'].replace('tt', '')) != 0: + if int(meta["imdb_id"].replace("tt", "")) != 0: params = { - 'csrfToken': csrfToken, - 'search': meta['imdb_id'], - 'active': '0', - 'options': '2', - 'category[]': await self.get_category_id(meta) + "csrfToken": csrfToken, + "search": meta["imdb_id"], + "active": "0", + "options": "2", + "category[]": await self.get_category_id(meta), } else: params = { - 'csrfToken': csrfToken, - 'search': meta['title'], - 'category[]': await self.get_category_id(meta), - 'options': '3' + "csrfToken": csrfToken, + "search": meta["title"], + "category[]": await self.get_category_id(meta), + "options": "3", } r = session.get(search_url, params=params) await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) + soup = BeautifulSoup(r.text, "html.parser") + find = soup.find_all("a", href=True) for each in find: - if each['href'].startswith('details.php?id='): + if each["href"].startswith("details.php?id="): dupes.append(each.text) return dupes @@ -231,7 +267,9 @@ async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") vcookie = await self.validate_cookies(meta, cookiefile) if vcookie is not True: - console.print('[red]Failed to validate cookies. Please confirm that the site is up or export a fresh cookie file from the site') + console.print( + "[red]Failed to validate cookies. Please confirm that the site is up or export a fresh cookie file from the site" + ) return False return True @@ -243,8 +281,8 @@ async def validate_cookies(self, meta, cookiefile): with requests.Session() as session: session.cookies.update(await common.parseCookieFile(cookiefile)) res = session.get(url=url) - if meta['debug']: - console.print('[cyan]Cookies:') + if meta["debug"]: + console.print("[cyan]Cookies:") console.print(session.cookies.get_dict()) console.print(res.url) if res.text.find("Logout") != -1: @@ -286,36 +324,64 @@ async def login(self, cookiefile): async def get_csrfToken(self, session, url): r = session.get(url) await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - csrfToken = soup.find('input', {'name': 'csrfToken'}).get('value') + soup = BeautifulSoup(r.text, "html.parser") + csrfToken = soup.find("input", {"name": "csrfToken"}).get("value") return csrfToken async def edit_desc(self, meta): # base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: - if meta['is_disc'] != 'BDMV': + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + newline="", + encoding="utf-8", + ) as descfile: + if meta["is_disc"] != "BDMV": # Beautify MediaInfo for HDT using custom template - video = meta['filelist'][0] - mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") + video = meta["filelist"][0] + mi_template = os.path.abspath( + f"{meta['base_dir']}/data/templates/MEDIAINFO.txt" + ) if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) - descfile.write(f"""[left][font=consolas]\n{media_info}\n[/font][/left]\n""") + media_info = MediaInfo.parse( + video, + output="STRING", + full=False, + mediainfo_options={"inform": f"file://{mi_template}"}, + ) + descfile.write( + f"""[left][font=consolas]\n{media_info}\n[/font][/left]\n""" + ) else: console.print("[bold red]Couldn't find the MediaInfo template") console.print("[green]Using normal MediaInfo for the description.") - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as MI: - descfile.write(f"""[left][font=consolas]\n{MI.read()}\n[/font][/left]\n\n""") + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ) as MI: + descfile.write( + f"""[left][font=consolas]\n{MI.read()}\n[/font][/left]\n\n""" + ) else: - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as BD_SUMMARY: - descfile.write(f"""[left][font=consolas]\n{BD_SUMMARY.read()}\n[/font][/left]\n\n""") + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ) as BD_SUMMARY: + descfile.write( + f"""[left][font=consolas]\n{BD_SUMMARY.read()}\n[/font][/left]\n\n""" + ) # Add Screenshots - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: for each in range(min(2, len(images))): - img_url = images[each]['img_url'] - raw_url = images[each]['raw_url'] - descfile.write(f' ') + img_url = images[each]["img_url"] + raw_url = images[each]["raw_url"] + descfile.write( + f' ' + ) descfile.close() diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 76acbb837..0312ed86a 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -9,7 +9,7 @@ from src.console import console -class HP(): +class HP: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,116 +20,146 @@ class HP(): def __init__(self, config): self.config = config - self.tracker = 'HP' - self.source_flag = 'Hidden-Palace' - self.upload_url = 'https://hidden-palace.net/api/torrents/upload' - self.search_url = 'https://hidden-palace.net/api/torrents/filter' + self.tracker = "HP" + self.source_flag = "Hidden-Palace" + self.upload_url = "https://hidden-palace.net/api/torrents/upload" + self.search_url = "https://hidden-palace.net/api/torrents/filter" self.signature = None self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -144,28 +174,32 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f"{meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index c1a5298f2..d98c4737e 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -11,7 +11,7 @@ from src.console import console -class HUNO(): +class HUNO: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -19,88 +19,126 @@ class HUNO(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'HUNO' - self.source_flag = 'HUNO' - self.search_url = 'https://hawke.uno/api/torrents/filter' - self.upload_url = 'https://hawke.uno/api/torrents/upload' + self.tracker = "HUNO" + self.source_flag = "HUNO" + self.search_url = "https://hawke.uno/api/torrents/filter" + self.upload_url = "https://hawke.uno/api/torrents/upload" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] + self.banned_groups = [ + "4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS" + ] pass async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.unit3d_edit_desc(meta, self.tracker, self.signature) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) + cat_id = await self.get_cat_id(meta["category"]) type_id = await self.get_type_id(meta) - resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) is False: + resolution_id = await self.get_res_id(meta["resolution"]) + if ( + meta["anon"] == 0 + and bool(str2bool(self.config["TRACKERS"]["HUNO"].get("anon", "False"))) + is False + ): anon = 0 else: anon = 1 # adding logic to check if its an encode or webrip and not HEVC as only HEVC encodes and webrips are allowed - if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): - console.print('[bold red]Only x265/HEVC encodes are allowed') + if meta["video_codec"] != "HEVC" and ( + meta["type"] == "ENCODE" or meta["type"] == "WEBRIP" + ): + console.print("[bold red]Only x265/HEVC encodes are allowed") return - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': await self.get_name(meta), - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': await self.is_plex_friendly(meta), - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'season_pack': meta.get('tv_pack', 0), + "name": await self.get_name(meta), + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": await self.is_plex_friendly(meta), + "sd": meta["sd"], + "keywords": meta["keywords"], + "season_pack": meta.get("tv_pack", 0), # 'featured' : 0, # 'free' : 0, # 'double_up' : 0, # 'sticky' : 0, } - tracker_config = self.config['TRACKERS'][self.tracker] + tracker_config = self.config["TRACKERS"][self.tracker] - if 'internal' in tracker_config: - if tracker_config['internal'] and meta['tag'] and meta['tag'][1:] in tracker_config.get('internal_groups', []): - data['internal'] = 1 + if "internal" in tracker_config: + if ( + tracker_config["internal"] + and meta["tag"] + and meta["tag"][1:] in tracker_config.get("internal_groups", []) + ): + data["internal"] = 1 else: - data['internal'] = 0 + data["internal"] = 0 headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': tracker_config['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": tracker_config["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) # adding torrent link to comment of torrent file - t_id = response.json()['data'].split(".")[1].split("/")[3] - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://hawke.uno/torrents/" + t_id) + t_id = response.json()["data"].split(".")[1].split("/")[3] + await common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.config["TRACKERS"][self.tracker].get("announce_url"), + "https://hawke.uno/torrents/" + t_id, + ) except Exception: console.print("It may have uploaded, go check") return @@ -110,22 +148,30 @@ async def upload(self, meta, disctype): open_torrent.close() def get_audio(self, meta): - channels = meta.get('channels', "") - codec = meta.get('audio', "").replace("DD+", "DDP").replace("EX", "").replace("Dual-Audio", "").replace(channels, "") - dual = "Dual-Audio" in meta.get('audio', "") + channels = meta.get("channels", "") + codec = ( + meta.get("audio", "") + .replace("DD+", "DDP") + .replace("EX", "") + .replace("Dual-Audio", "") + .replace(channels, "") + ) + dual = "Dual-Audio" in meta.get("audio", "") language = "" if dual: language = "DUAL" - elif 'mediainfo' in meta: - language = next(x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio").get('Language_String', "English") - language = re.sub(r'\(.+\)', '', language) + elif "mediainfo" in meta: + language = next( + x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio" + ).get("Language_String", "English") + language = re.sub(r"\(.+\)", "", language) if language == "zxx": language = "Silent" - return f'{codec} {channels} {language}' + return f"{codec} {channels} {language}" def get_basename(self, meta): - path = next(iter(meta['filelist']), meta['path']) + path = next(iter(meta["filelist"]), meta["path"]) return os.path.basename(path) async def get_name(self, meta): @@ -133,50 +179,54 @@ async def get_name(self, meta): # It was much easier to build the name from scratch than to alter the existing name. basename = self.get_basename(meta) - hc = meta.get('hardcoded-subs') - type = meta.get('type', "") - title = meta.get('title', "") - alt_title = meta.get('aka', "") # noqa F841 - year = meta.get('year', "") - resolution = meta.get('resolution', "") + hc = meta.get("hardcoded-subs") + type = meta.get("type", "") + title = meta.get("title", "") + alt_title = meta.get("aka", "") # noqa F841 + year = meta.get("year", "") + resolution = meta.get("resolution", "") audio = self.get_audio(meta) - service = meta.get('service', "") - season = meta.get('season', "") - episode = meta.get('episode', "") - repack = meta.get('repack', "") + service = meta.get("service", "") + season = meta.get("season", "") + episode = meta.get("episode", "") + repack = meta.get("repack", "") if repack.strip(): repack = f"[{repack}]" - three_d = meta.get('3D', "") - tag = meta.get('tag', "").replace("-", "- ") + three_d = meta.get("3D", "") + tag = meta.get("tag", "").replace("-", "- ") if tag == "": tag = "- NOGRP" - source = meta.get('source', "") - uhd = meta.get('uhd', "") - hdr = meta.get('hdr', "") + source = meta.get("source", "") + uhd = meta.get("uhd", "") + hdr = meta.get("hdr", "") if not hdr.strip(): hdr = "SDR" - distributor = meta.get('distributor', "") # noqa F841 - video_codec = meta.get('video_codec', "") - video_encode = meta.get('video_encode', "").replace(".", "") - if 'x265' in basename: - video_encode = video_encode.replace('H', 'x') - region = meta.get('region', "") - dvd_size = meta.get('dvd_size', "") - edition = meta.get('edition', "") + distributor = meta.get("distributor", "") # noqa F841 + video_codec = meta.get("video_codec", "") + video_encode = meta.get("video_encode", "").replace(".", "") + if "x265" in basename: + video_encode = video_encode.replace("H", "x") + region = meta.get("region", "") + dvd_size = meta.get("dvd_size", "") + edition = meta.get("edition", "") hybrid = "Hybrid" if "HYBRID" in basename.upper() else "" - search_year = meta.get('search_year', "") + search_year = meta.get("search_year", "") if not str(search_year).strip(): search_year = year - scale = "DS4K" if "DS4K" in basename.upper() else "RM4K" if "RM4K" in basename.upper() else "" + scale = ( + "DS4K" + if "DS4K" in basename.upper() + else "RM4K" if "RM4K" in basename.upper() else "" + ) # YAY NAMING FUN - if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if meta["category"] == "MOVIE": # MOVIE SPECIFIC if type == "DISC": # Disk - if meta['is_disc'] == 'BDMV': + if meta["is_disc"] == "BDMV": name = f"{title} ({year}) {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif meta['is_disc'] == 'DVD': + elif meta["is_disc"] == "DVD": name = f"{title} ({year}) {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif meta['is_disc'] == 'HDDVD': + elif meta["is_disc"] == "HDDVD": name = f"{title} ({year}) {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({year}) {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" @@ -188,13 +238,13 @@ async def get_name(self, meta): name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" elif type == "HDTV": # HDTV name = f"{title} ({year}) {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" - elif meta['category'] == "TV": # TV SPECIFIC + elif meta["category"] == "TV": # TV SPECIFIC if type == "DISC": # Disk - if meta['is_disc'] == 'BDMV': + if meta["is_disc"] == "BDMV": name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - if meta['is_disc'] == 'DVD': + if meta["is_disc"] == "DVD": name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif meta['is_disc'] == 'HDDVD': + elif meta["is_disc"] == "HDDVD": name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE @@ -208,44 +258,44 @@ async def get_name(self, meta): name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" if hc: - name = re.sub(r'((\([0-9]{4}\)))', r'\1 Ensubbed', name) - return ' '.join(name.split()).replace(": ", " - ") + name = re.sub(r"((\([0-9]{4}\)))", r"\1 Ensubbed", name) + return " ".join(name.split()).replace(": ", " - ") async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, meta): basename = self.get_basename(meta) - type = meta['type'] + type = meta["type"] - if type == 'REMUX': - return '2' - elif type in ('WEBDL', 'WEBRIP'): - return '15' if 'x265' in basename else '3' - elif type in ('ENCODE', 'HDTV'): - return '15' - elif type == 'DISC': - return '1' + if type == "REMUX": + return "2" + elif type in ("WEBDL", "WEBRIP"): + return "15" if "x265" in basename else "3" + elif type in ("ENCODE", "HDTV"): + return "15" + elif type == "DISC": + return "1" else: - return '0' + return "0" async def get_res_id(self, resolution): resolution_id = { - 'Other': '10', - '4320p': '1', - '2160p': '2', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "Other": "10", + "4320p": "1", + "2160p": "2", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def is_plex_friendly(self, meta): @@ -261,27 +311,29 @@ async def search_existing(self, meta, disctype): console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS']['HUNO']['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"]["HUNO"]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] + meta['edition'] + if meta["category"] == "TV": + params["name"] = f"{meta.get('season', '')}{meta.get('episode', '')}" + if meta.get("edition", "") != "": + params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 76e8e78f9..20dc6013e 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -9,7 +9,7 @@ from src.console import console -class JPTV(): +class JPTV: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,123 +20,149 @@ class JPTV(): def __init__(self, config): self.config = config - self.tracker = 'JPTV' - self.source_flag = 'jptv.club' - self.upload_url = 'https://jptv.club/api/torrents/upload' - self.search_url = 'https://jptv.club/api/torrents/filter' + self.tracker = "JPTV" + self.source_flag = "jptv.club" + self.upload_url = "https://jptv.club/api/torrents/upload" + self.search_url = "https://jptv.club/api/torrents/filter" self.signature = None self.banned_groups = [""] pass async def get_cat_id(self, meta): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(meta['category'], '0') - if meta['anime']: + "MOVIE": "1", + "TV": "2", + }.get(meta["category"], "0") + if meta["anime"]: category_id = { - 'MOVIE': '7', - 'TV': '9', - }.get(meta['category'], '0') + "MOVIE": "7", + "TV": "9", + }.get(meta["category"], "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '16', - 'REMUX': '18', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "16", + "REMUX": "18", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = "" - for each in meta['discs']: - mi_dump = mi_dump + each['summary'].strip() + "\n\n" + for each in meta["discs"]: + mi_dump = mi_dump + each["summary"].strip() + "\n\n" else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() # bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': jptv_name, - 'description': desc, - 'mediainfo': mi_dump, + "name": jptv_name, + "description": desc, + "mediainfo": mi_dump, # 'bdinfo' : bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -151,50 +177,57 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdb": meta["tmdb"], + "categories[]": await self.get_cat_id(meta), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - if meta['debug']: + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" + if meta["debug"]: console.log("[cyan]Dupe Search Parameters") console.log(params) try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes async def edit_name(self, meta): - name = meta.get('name') - aka = meta.get('aka') - original_title = meta.get('original_title') - year = str(meta.get('year')) # noqa F841 - audio = meta.get('audio') - source = meta.get('source') # noqa F841 - is_disc = meta.get('is_disc') # noqa F841 - if aka != '': + name = meta.get("name") + aka = meta.get("aka") + original_title = meta.get("original_title") + year = str(meta.get("year")) # noqa F841 + audio = meta.get("audio") + source = meta.get("source") # noqa F841 + is_disc = meta.get("is_disc") # noqa F841 + if aka != "": # ugly fix to remove the extra space in the title - aka = aka + ' ' + aka = aka + " " name = name.replace(aka, f'{original_title} {chr(int("202A", 16))}') - elif aka == '': - if meta.get('title') != original_title: + elif aka == "": + if meta.get("title") != original_title: # name = f'{name[:name.find(year)]}/ {original_title} {chr(int("202A", 16))}{name[name.find(year):]}' - name = name.replace(meta['title'], f"{original_title} {chr(int('202A', 16))} {meta['title']}") - if 'AAC' in audio: - name = name.replace(audio.strip().replace(" ", " "), audio.replace(" ", "")) + name = name.replace( + meta["title"], + f"{original_title} {chr(int('202A', 16))} {meta['title']}", + ) + if "AAC" in audio: + name = name.replace( + audio.strip().replace(" ", " "), audio.replace(" ", "") + ) name = name.replace("DD+ ", "DD+") return name diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 5b7397d3d..b14419b0a 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -9,7 +9,7 @@ from src.console import console -class LCD(): +class LCD: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -17,13 +17,14 @@ class LCD(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'LCD' - self.source_flag = 'LOCADORA' - self.search_url = 'https://locadora.cc/api/torrents/filter' - self.torrent_url = 'https://locadora.cc/api/torrents/' - self.upload_url = 'https://locadora.cc/api/torrents/upload' + self.tracker = "LCD" + self.source_flag = "LOCADORA" + self.search_url = "https://locadora.cc/api/torrents/filter" + self.torrent_url = "https://locadora.cc/api/torrents/" + self.upload_url = "https://locadora.cc/api/torrents/upload" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass @@ -32,71 +33,103 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', ''), meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + cat_id = await self.get_cat_id(meta["category"], meta.get("edition", ""), meta) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", + "rb", + ) + files = { + "torrent": ("placeholder.torrent", open_torrent, "application/x-bittorrent") + } data = { - 'name': name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -108,74 +141,92 @@ async def upload(self, meta, disctype): open_torrent.close() async def get_cat_id(self, category_name, edition, meta): - category_id = { - 'MOVIE': '1', - 'TV': '2', - 'ANIMES': '6' - }.get(category_name, '0') - if meta['anime'] is True and category_id == '2': - category_id = '6' + category_id = {"MOVIE": "1", "TV": "2", "ANIMES": "6"}.get(category_name, "0") + if meta["anime"] is True and category_id == "2": + category_id = "6" return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "ENCODE": "3", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { # '8640p':'10', - '4320p': '1', - '2160p': '2', + "4320p": "1", + "2160p": "2", # '1440p' : '2', - '1080p': '3', - '1080i': '34', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9', - 'Other': '10', - }.get(resolution, '10') + "1080p": "3", + "1080i": "34", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + "Other": "10", + }.get(resolution, "10") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id( + meta["category"], meta.get("edition", ""), meta + ), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') + console.print( + "[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta" + ) await asyncio.sleep(5) return dupes async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.264").replace("DD+7 1", "DD+7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('-C A A', '-C.A.A'), + name = ( + meta["uuid"] + .replace(".mkv", "") + .replace(".mp4", "") + .replace(".", " ") + .replace("DDP2 0", "DDP2.0") + .replace("DDP5 1", "DDP5.1") + .replace("H 264", "H.264") + .replace("H 265", "H.264") + .replace("DD+7 1", "DD+7.1") + .replace("AAC2 0", "AAC2.0") + .replace("DD5 1", "DD5.1") + .replace("DD2 0", "DD2.0") + .replace("TrueHD 7 1", "TrueHD 7.1") + .replace("DTS-HD MA 7 1", "DTS-HD MA 7.1") + .replace("-C A A", "-C.A.A"), + ) return name diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 83fc5e1b3..dac8678f7 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -9,7 +9,7 @@ from src.console import console -class LST(): +class LST: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,133 +20,211 @@ class LST(): def __init__(self, config): self.config = config - self.tracker = 'LST' - self.source_flag = 'LST.GG' - self.upload_url = 'https://lst.gg/api/torrents/upload' - self.search_url = 'https://lst.gg/api/torrents/filter' - self.torrent_url = 'https://lst.gg/api/torrents/' + self.tracker = "LST" + self.source_flag = "LST.GG" + self.upload_url = "https://lst.gg/api/torrents/upload" + self.search_url = "https://lst.gg/api/torrents/filter" + self.torrent_url = "https://lst.gg/api/torrents/" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', - 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', - 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY'] + self.banned_groups = [ + "aXXo", + "BRrip", + "CM8", + "CrEwSaDe", + "CTFOH", + "DNL", + "FaNGDiNG0", + "HD2DVD", + "HDTime", + "ION10", + "iPlanet", + "KiNGDOM", + "mHD", + "mSD", + "nHD", + "nikt0", + "nSD", + "NhaNc3", + "OFT", + "PRODJi", + "SANTi", + "STUTTERSHIT", + "ViSION", + "VXT", + "WAF", + "x0r", + "YIFY", + "Sicario", + "RARBG", + "MeGusta", + "TSP", + "TSPxL", + "GalaxyTV", + "TGALAXY", + "TORRENTGALAXY", + ] pass async def get_cat_id(self, category_name, keywords, service): category_id = { - 'MOVIE': '1', - 'TV': '2', - 'Anime': '6', - }.get(category_name, '0') - if category_name == 'TV' and 'anime' in keywords: - category_id = '6' - elif category_name == 'TV' and 'hentai' in service: - category_id = '8' + "MOVIE": "1", + "TV": "2", + "Anime": "6", + }.get(category_name, "0") + if category_name == "TV" and "anime" in keywords: + category_id = "6" + elif category_name == "TV" and "hentai" in service: + category_id = "8" return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - draft = await self.get_flag(meta, 'draft') + cat_id = await self.get_cat_id( + meta["category"], meta.get("keywords", ""), meta.get("service", "") + ) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + modq = await self.get_flag(meta, "modq") + draft = await self.get_flag(meta, "draft") await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - if meta.get('service') == "hentai": - desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + if meta.get("service") == "hentai": + desc = ( + "[center]" + + "[img]" + + str(meta["poster"]) + + "[/img][/center]" + + "\n[center]" + + "https://www.themoviedb.org/tv/" + + str(meta["tmdb"]) + + "\nhttps://myanimelist.net/anime/" + + str(meta["mal"]) + + "[/center]" + + desc + ) - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, - 'draft_queue_opt_in': draft, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, + "mod_queue_opt_in": modq, + "draft_queue_opt_in": draft, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -158,7 +236,7 @@ async def upload(self, meta, disctype): open_torrent.close() async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + config_flag = self.config["TRACKERS"][self.tracker].get(flag_name) if config_flag is not None: return 1 if config_flag else 0 @@ -168,27 +246,33 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id( + meta["category"], meta.get("keywords", ""), meta.get("service", "") + ), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/LT.py b/src/trackers/LT.py index c6e0e4be1..66dd5aac6 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -9,7 +9,7 @@ from src.console import console -class LT(): +class LT: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,139 +20,179 @@ class LT(): def __init__(self, config): self.config = config - self.tracker = 'LT' + self.tracker = "LT" self.source_flag = 'Lat-Team "Poder Latino"' - self.upload_url = 'https://lat-team.com/api/torrents/upload' - self.search_url = 'https://lat-team.com/api/torrents/filter' - self.signature = '' + self.upload_url = "https://lat-team.com/api/torrents/upload" + self.search_url = "https://lat-team.com/api/torrents/filter" + self.signature = "" self.banned_groups = [""] pass async def get_cat_id(self, category_name, meta): category_id = { - 'MOVIE': '1', - 'TV': '2', - 'ANIME': '5', - 'TELENOVELAS': '8', - 'Doramas & Turcas': '20', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + "ANIME": "5", + "TELENOVELAS": "8", + "Doramas & Turcas": "20", + }.get(category_name, "0") # if is anime - if meta['anime'] is True and category_id == '2': - category_id = '5' + if meta["anime"] is True and category_id == "2": + category_id = "5" # elif is telenovela - elif category_id == '2' and ("telenovela" in meta['keywords'] or "telenovela" in meta['overview']): - category_id = '8' + elif category_id == "2" and ( + "telenovela" in meta["keywords"] or "telenovela" in meta["overview"] + ): + category_id = "8" # if is TURCAS o Doramas # elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : - # category_id = '20' + # category_id = '20' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def edit_name(self, meta): - lt_name = meta['name'].replace('Dubbed', '').replace('Dual-Audio', '').replace(' ', ' ').strip() + lt_name = ( + meta["name"] + .replace("Dubbed", "") + .replace("Dual-Audio", "") + .replace(" ", " ") + .strip() + ) # Check if audio Spanish exists, if not append [SUBS] at the end - if meta['type'] != 'DISC': # DISC don't have mediainfo - audio_language_list = meta['mediainfo']['media']['track'][0].get('Audio_Language_List', '') - if 'Spanish' not in audio_language_list and '[SUBS]' not in lt_name: - if not meta['tag']: + if meta["type"] != "DISC": # DISC don't have mediainfo + audio_language_list = meta["mediainfo"]["media"]["track"][0].get( + "Audio_Language_List", "" + ) + if "Spanish" not in audio_language_list and "[SUBS]" not in lt_name: + if not meta["tag"]: lt_name += " [SUBS]" else: - lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") + lt_name = lt_name.replace(meta["tag"], f" [SUBS]{meta['tag']}") return lt_name async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category'], meta) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"], meta) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) # region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': lt_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": lt_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = int(meta.get('season_int', '0')) - data['episode_number'] = int(meta.get('episode_int', '0')) + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = int(meta.get("season_int", "0")) + data["episode_number"] = int(meta.get("episode_int", "0")) headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -167,27 +207,31 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"], meta), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 9e6ea24f8..97b159dc7 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -14,7 +14,7 @@ from datetime import datetime -class MTV(): +class MTV: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -25,15 +25,40 @@ class MTV(): def __init__(self, config): self.config = config - self.tracker = 'MTV' - self.source_flag = 'MTV' - self.upload_url = 'https://www.morethantv.me/upload.php' - self.forum_link = 'https://www.morethantv.me/wiki.php?action=article&id=73' - self.search_url = 'https://www.morethantv.me/api/torznab' + self.tracker = "MTV" + self.source_flag = "MTV" + self.upload_url = "https://www.morethantv.me/upload.php" + self.forum_link = "https://www.morethantv.me/wiki.php?action=article&id=73" + self.search_url = "https://www.morethantv.me/api/torznab" self.banned_groups = [ - 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'iPlanet', - 'KiNGDOM', 'Leffe', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'PRODJi', 'RDN', 'SANTi', - 'STUTTERSHIT', 'TERMiNAL', 'ViSION', 'WAF', 'x0r', 'YIFY', ['EVO', 'WEB-DL Only'] + "aXXo", + "BRrip", + "CM8", + "CrEwSaDe", + "DNL", + "FaNGDiNG0", + "FRDS", + "HD2DVD", + "HDTime", + "iPlanet", + "KiNGDOM", + "Leffe", + "mHD", + "mSD", + "nHD", + "nikt0", + "nSD", + "NhaNc3", + "PRODJi", + "RDN", + "SANTi", + "STUTTERSHIT", + "TERMiNAL", + "ViSION", + "WAF", + "x0r", + "YIFY", + ["EVO", "WEB-DL Only"], ] pass @@ -45,22 +70,31 @@ async def upload(self, meta, disctype): await self.upload_with_retry(meta, cookiefile, common) async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ["ptpimg", "imgbox"] # Check if the images are already hosted on an approved image host - if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): - console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") - image_list = meta['image_list'] # Use the existing images + if all( + any(host in image["raw_url"] for host in approved_image_hosts) + for image in meta["image_list"] + ): + console.print( + "[green]Images are already hosted on an approved image host. Skipping re-upload." + ) + image_list = meta["image_list"] # Use the existing images else: # Proceed with the retry logic if images are not hosted on an approved image host while img_host_index <= len(approved_image_hosts): # Call handle_image_upload and pass the updated meta with the current image host index - image_list, retry_mode = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + image_list, retry_mode = await self.handle_image_upload( + meta, img_host_index, approved_image_hosts + ) # If retry_mode is True, switch to the next host if retry_mode: - console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") + console.print( + f"[yellow]Switching to the next image host. Current index: {img_host_index}" + ) img_host_index += 1 continue @@ -69,7 +103,9 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): break if image_list is None: - console.print("[red]All image hosts failed. Please check your configuration.") + console.print( + "[red]All image hosts failed. Please check your configuration." + ) return # Proceed with the rest of the upload process @@ -78,25 +114,36 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): torrent = Torrent.read(torrent_path) if torrent.piece_size > 8388608: # 8 MiB in bytes - console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + console.print( + "[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent" + ) # Override the max_piece_size to 8 MiB - meta['max_piece_size'] = '8' # 8 MiB, to ensure the new torrent adheres to this limit + meta["max_piece_size"] = ( + "8" # 8 MiB, to ensure the new torrent adheres to this limit + ) # Determine include and exclude patterns based on whether it's a disc or not - if meta['is_disc']: - include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list - exclude = [] # Adjust as needed for disc-specific exclusions, make sure it's a list + if meta["is_disc"]: + include = ( + [] + ) # Adjust as needed for disc-specific inclusions, make sure it's a list + exclude = ( + [] + ) # Adjust as needed for disc-specific exclusions, make sure it's a list else: include = ["*.mkv", "*.mp4", "*.ts"] exclude = ["*.*", "*sample.mkv", "!sample*.*"] # Create a new torrent with piece size explicitly set to 8 MiB from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) new_torrent = prep.CustomTorrent( meta=meta, - path=Path(meta['path']), + path=Path(meta["path"]), trackers=["https://fake.tracker"], source="L4G", private=True, @@ -104,20 +151,24 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): include_globs=include, # Ensure this is always a list creation_date=datetime.now(), comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" + created_by="L4G's Upload Assistant", ) # Validate and write the new torrent new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) - new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) + new_torrent.write( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True + ) torrent_filename = "MTV" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + await common.edit_torrent( + meta, self.tracker, self.source_flag, torrent_filename=torrent_filename + ) cat_id = await self.get_cat_id(meta) - resolution_id = await self.get_res_id(meta['resolution']) + resolution_id = await self.get_res_id(meta["resolution"]) source_id = await self.get_source_id(meta) origin_id = await self.get_origin_id(meta) des_tags = await self.get_tags(meta) @@ -127,42 +178,51 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): group_desc = await self.edit_group_desc(meta) mtv_name = await self.edit_name(meta) - anon = 1 if meta['anon'] != 0 or bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) else 0 + anon = ( + 1 + if meta["anon"] != 0 + or bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + else 0 + ) - desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - desc = open(desc_path, 'r', encoding='utf-8').read() + desc_path = ( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + ) + desc = open(desc_path, "r", encoding="utf-8").read() torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - with open(torrent_file_path, 'rb') as f: + with open(torrent_file_path, "rb") as f: tfile = f.read() - files = { - 'file_input': (f"{meta['name']}.torrent", tfile) - } + files = {"file_input": (f"{meta['name']}.torrent", tfile)} data = { - 'image': '', - 'title': mtv_name, - 'category': cat_id, - 'Resolution': resolution_id, - 'source': source_id, - 'origin': origin_id, - 'taglist': des_tags, - 'desc': desc, - 'groupDesc': group_desc, - 'ignoredupes': '1', - 'genre_tags': '---', - 'autocomplete_toggle': 'on', - 'fontfont': '-1', - 'fontsize': '-1', - 'auth': await self.get_auth(cookiefile), - 'anonymous': anon, - 'submit': 'true', + "image": "", + "title": mtv_name, + "category": cat_id, + "Resolution": resolution_id, + "source": source_id, + "origin": origin_id, + "taglist": des_tags, + "desc": desc, + "groupDesc": group_desc, + "ignoredupes": "1", + "genre_tags": "---", + "autocomplete_toggle": "on", + "fontfont": "-1", + "fontsize": "-1", + "auth": await self.get_auth(cookiefile), + "anonymous": anon, + "submit": "true", } - if not meta['debug']: + if not meta["debug"]: with requests.Session() as session: - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) response = session.post(url=self.upload_url, data=data, files=files) try: @@ -170,9 +230,13 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): console.print(response.url) else: if "authkey.php" in response.url: - console.print("[red]No DL link in response, It may have uploaded, check manually.") + console.print( + "[red]No DL link in response, It may have uploaded, check manually." + ) else: - console.print("[red]Upload Failed. It doesn't look like you are logged in.") + console.print( + "[red]Upload Failed. It doesn't look like you are logged in." + ) except Exception: console.print("[red]It may have uploaded, check manually.") print(traceback.print_exc()) @@ -181,70 +245,108 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): console.print(data) return - async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None): + async def handle_image_upload( + self, meta, img_host_index=1, approved_image_hosts=None + ): if approved_image_hosts is None: - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ["ptpimg", "imgbox"] - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + current_img_host_key = f"img_host_{img_host_index}" + current_img_host = self.config.get("DEFAULT", {}).get(current_img_host_key) if not current_img_host or current_img_host not in approved_image_hosts: - console.print("[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host.") + console.print( + "[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host." + ) retry_mode = True # Ensure retry_mode is set to True when switching hosts - meta['imghost'] = approved_image_hosts[0] # Switch to the first approved host + meta["imghost"] = approved_image_hosts[ + 0 + ] # Switch to the first approved host else: - meta['imghost'] = current_img_host - retry_mode = False # Start with retry_mode False unless we know we need to switch + meta["imghost"] = current_img_host + retry_mode = ( + False # Start with retry_mode False unless we know we need to switch + ) from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) # Screenshot and upload process - prep.screenshots(Path(meta['path']), meta['name'], meta['uuid'], meta['base_dir'], meta) + prep.screenshots( + Path(meta["path"]), meta["name"], meta["uuid"], meta["base_dir"], meta + ) return_dict = {} # Call upload_screens with the appropriate retry_mode prep.upload_screens( meta, - screens=meta['screens'], + screens=meta["screens"], img_host_num=img_host_index, i=0, - total_screens=meta['screens'], + total_screens=meta["screens"], custom_img_list=[], # This remains to handle any custom logic in the original function return_dict=return_dict, - retry_mode=retry_mode # Honor the retry_mode flag passed in + retry_mode=retry_mode, # Honor the retry_mode flag passed in ) # Update meta['image_list'] with uploaded images - meta['image_list'] = return_dict.get('image_list', []) + meta["image_list"] = return_dict.get("image_list", []) # Ensure images are from approved hosts - if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): - console.print("[red]Unsupported image host detected, please use one of the approved image hosts") - return meta['image_list'], True # Trigger retry_mode if switching hosts + if not all( + any(x in image["raw_url"] for x in approved_image_hosts) + for image in meta["image_list"] + ): + console.print( + "[red]Unsupported image host detected, please use one of the approved image hosts" + ) + return meta["image_list"], True # Trigger retry_mode if switching hosts - return meta['image_list'], False # No need to retry, successful upload + return meta["image_list"], False # No need to retry, successful upload async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc: # adding bd_dump to description if it exits and adding empty string to mediainfo - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read()[:-65].strip() + mi_dump = ( + open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ) + .read()[:-65] + .strip() + ) bd_dump = None if bd_dump: desc.write("[mediainfo]" + bd_dump + "[/mediainfo]\n\n") elif mi_dump: desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: desc.write("[spoiler=Screenshots]") for each in range(len(images)): - raw_url = images[each]['raw_url'] - img_url = images[each]['img_url'] + raw_url = images[each]["raw_url"] + img_url = images[each]["img_url"] desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") desc.write("[/spoiler]") desc.write(f"\n\n{base}") @@ -253,183 +355,226 @@ async def edit_desc(self, meta): async def edit_group_desc(self, meta): description = "" - if meta['imdb_id'] not in ("0", "", None): + if meta["imdb_id"] not in ("0", "", None): description += f"https://www.imdb.com/title/tt{meta['imdb_id']}" - if meta['tmdb'] != 0: + if meta["tmdb"] != 0: description += f"\nhttps://www.themoviedb.org/{str(meta['category'].lower())}/{str(meta['tmdb'])}" - if meta['tvdb_id'] != 0: + if meta["tvdb_id"] != 0: description += f"\nhttps://www.thetvdb.com/?id={str(meta['tvdb_id'])}" - if meta['tvmaze_id'] != 0: + if meta["tvmaze_id"] != 0: description += f"\nhttps://www.tvmaze.com/shows/{str(meta['tvmaze_id'])}" - if meta['mal_id'] != 0: + if meta["mal_id"] != 0: description += f"\nhttps://myanimelist.net/anime/{str(meta['mal_id'])}" return description async def edit_name(self, meta): - mtv_name = meta['uuid'] + mtv_name = meta["uuid"] # Try to use original filename if possible - if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): - if not meta['isdir']: + if meta["source"].lower().replace("-", "") in mtv_name.replace("-", "").lower(): + if not meta["isdir"]: mtv_name = os.path.splitext(mtv_name)[0] else: - mtv_name = meta['name'] - if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE') and "DD" in meta['audio']: - mtv_name = mtv_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1)) - mtv_name = mtv_name.replace(meta.get('aka', ''), '') - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': - mtv_name = mtv_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}") - if 'DD+' in meta.get('audio', '') and 'DDP' in meta['uuid']: - mtv_name = mtv_name.replace('DD+', 'DDP') - mtv_name = mtv_name.replace('Dubbed', '').replace('Dual-Audio', 'DUAL') + mtv_name = meta["name"] + if ( + meta.get("type") in ("WEBDL", "WEBRIP", "ENCODE") + and "DD" in meta["audio"] + ): + mtv_name = mtv_name.replace( + meta["audio"], meta["audio"].replace(" ", "", 1) + ) + mtv_name = mtv_name.replace(meta.get("aka", ""), "") + if ( + meta["category"] == "TV" + and meta.get("tv_pack", 0) == 0 + and meta.get("episode_title_storage", "").strip() != "" + and meta["episode"].strip() != "" + ): + mtv_name = mtv_name.replace( + meta["episode"], + f"{meta['episode']} {meta['episode_title_storage']}", + ) + if "DD+" in meta.get("audio", "") and "DDP" in meta["uuid"]: + mtv_name = mtv_name.replace("DD+", "DDP") + mtv_name = mtv_name.replace("Dubbed", "").replace("Dual-Audio", "DUAL") # Add -NoGrp if missing tag - if meta['tag'] == "": + if meta["tag"] == "": mtv_name = f"{mtv_name}-NoGrp" - mtv_name = ' '.join(mtv_name.split()) + mtv_name = " ".join(mtv_name.split()) mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) - mtv_name = mtv_name.replace(' ', '.').replace('..', '.') + mtv_name = mtv_name.replace(" ", ".").replace("..", ".") return mtv_name async def get_res_id(self, resolution): resolution_id = { - '8640p': '0', - '4320p': '4000', - '2160p': '2160', - '1440p': '1440', - '1080p': '1080', - '1080i': '1080', - '720p': '720', - '576p': '0', - '576i': '0', - '480p': '480', - '480i': '480' - }.get(resolution, '10') + "8640p": "0", + "4320p": "4000", + "2160p": "2160", + "1440p": "1440", + "1080p": "1080", + "1080i": "1080", + "720p": "720", + "576p": "0", + "576i": "0", + "480p": "480", + "480i": "480", + }.get(resolution, "10") return resolution_id async def get_cat_id(self, meta): - if meta['category'] == "MOVIE": - if meta['sd'] == 1: + if meta["category"] == "MOVIE": + if meta["sd"] == 1: return 2 else: return 1 - if meta['category'] == "TV": - if meta['tv_pack'] == 1: - if meta['sd'] == 1: + if meta["category"] == "TV": + if meta["tv_pack"] == 1: + if meta["sd"] == 1: return 6 else: return 5 else: - if meta['sd'] == 1: + if meta["sd"] == 1: return 4 else: return 3 async def get_source_id(self, meta): - if meta['is_disc'] == 'DVD': - return '1' - elif meta['is_disc'] == 'BDMV' or meta['type'] == "REMUX": - return '7' + if meta["is_disc"] == "DVD": + return "1" + elif meta["is_disc"] == "BDMV" or meta["type"] == "REMUX": + return "7" else: type_id = { - 'DISC': '1', - 'WEBDL': '9', - 'WEBRIP': '10', - 'HDTV': '1', - 'SDTV': '2', - 'TVRIP': '3', - 'DVD': '4', - 'DVDRIP': '5', - 'BDRIP': '8', - 'VHS': '6', - 'MIXED': '11', - 'Unknown': '12', - 'ENCODE': '7' - }.get(meta['type'], '0') + "DISC": "1", + "WEBDL": "9", + "WEBRIP": "10", + "HDTV": "1", + "SDTV": "2", + "TVRIP": "3", + "DVD": "4", + "DVDRIP": "5", + "BDRIP": "8", + "VHS": "6", + "MIXED": "11", + "Unknown": "12", + "ENCODE": "7", + }.get(meta["type"], "0") return type_id async def get_origin_id(self, meta): - if meta['personalrelease']: - return '4' - elif meta['scene']: - return '2' + if meta["personalrelease"]: + return "4" + elif meta["scene"]: + return "2" # returning P2P else: - return '3' + return "3" async def get_tags(self, meta): tags = [] # Genres - tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split(',')]) + tags.extend( + [x.strip(", ").lower().replace(" ", ".") for x in meta["genres"].split(",")] + ) # Resolution - tags.append(meta['resolution'].lower()) - if meta['sd'] == 1: - tags.append('sd') - elif meta['resolution'] in ['2160p', '4320p']: - tags.append('uhd') + tags.append(meta["resolution"].lower()) + if meta["sd"] == 1: + tags.append("sd") + elif meta["resolution"] in ["2160p", "4320p"]: + tags.append("uhd") else: - tags.append('hd') + tags.append("hd") # Streaming Service - if str(meta['service_longname']) != "": + if str(meta["service_longname"]) != "": tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") # Release Type/Source - for each in ['remux', 'WEB.DL', 'WEBRip', 'HDTV', 'BluRay', 'DVD', 'HDDVD']: - if (each.lower().replace('.', '') in meta['type'].lower()) or (each.lower().replace('-', '') in meta['source']): + for each in ["remux", "WEB.DL", "WEBRip", "HDTV", "BluRay", "DVD", "HDDVD"]: + if (each.lower().replace(".", "") in meta["type"].lower()) or ( + each.lower().replace("-", "") in meta["source"] + ): tags.append(each) # series tags - if meta['category'] == "TV": - if meta.get('tv_pack', 0) == 0: + if meta["category"] == "TV": + if meta.get("tv_pack", 0) == 0: # Episodes - if meta['sd'] == 1: - tags.extend(['episode.release', 'sd.episode']) + if meta["sd"] == 1: + tags.extend(["episode.release", "sd.episode"]) else: - tags.extend(['episode.release', 'hd.episode']) + tags.extend(["episode.release", "hd.episode"]) else: # Seasons - if meta['sd'] == 1: - tags.append('sd.season') + if meta["sd"] == 1: + tags.append("sd.season") else: - tags.append('hd.season') + tags.append("hd.season") # movie tags - if meta['category'] == 'MOVIE': - if meta['sd'] == 1: - tags.append('sd.movie') + if meta["category"] == "MOVIE": + if meta["sd"] == 1: + tags.append("sd.movie") else: - tags.append('hd.movie') + tags.append("hd.movie") # Audio tags audio_tag = "" - for each in ['dd', 'ddp', 'aac', 'truehd', 'mp3', 'mp2', 'dts', 'dts.hd', 'dts.x']: - if each in meta['audio'].replace('+', 'p').replace('-', '.').replace(':', '.').replace(' ', '.').lower(): - audio_tag = f'{each}.audio' + for each in [ + "dd", + "ddp", + "aac", + "truehd", + "mp3", + "mp2", + "dts", + "dts.hd", + "dts.x", + ]: + if ( + each + in meta["audio"] + .replace("+", "p") + .replace("-", ".") + .replace(":", ".") + .replace(" ", ".") + .lower() + ): + audio_tag = f"{each}.audio" tags.append(audio_tag) - if 'atmos' in meta['audio'].lower(): - tags.append('atmos.audio') + if "atmos" in meta["audio"].lower(): + tags.append("atmos.audio") # Video tags - tags.append(meta.get('video_codec').replace('AVC', 'h264').replace('HEVC', 'h265').replace('-', '')) + tags.append( + meta.get("video_codec") + .replace("AVC", "h264") + .replace("HEVC", "h265") + .replace("-", "") + ) # Group Tags - if meta['tag'] != "": + if meta["tag"] != "": tags.append(f"{meta['tag'][1:].replace(' ', '.')}.release") else: - tags.append('NOGRP.release') + tags.append("NOGRP.release") # Scene/P2P - if meta['scene']: - tags.append('scene.group.release') + if meta["scene"]: + tags.append("scene.group.release") else: - tags.append('p2p.group.release') + tags.append("p2p.group.release") # Has subtitles - if meta.get('is_disc', '') != "BDMV": - if any(track.get('@type', '') == "Text" for track in meta['mediainfo']['media']['track']): - tags.append('subtitles') + if meta.get("is_disc", "") != "BDMV": + if any( + track.get("@type", "") == "Text" + for track in meta["mediainfo"]["media"]["track"] + ): + tags.append("subtitles") else: - if len(meta['bdinfo']['subtitles']) >= 1: - tags.append('subtitles') + if len(meta["bdinfo"]["subtitles"]) >= 1: + tags.append("subtitles") - tags = ' '.join(tags) + tags = " ".join(tags) return tags async def validate_credentials(self, meta): @@ -438,7 +583,9 @@ async def validate_credentials(self, meta): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) if vcookie is not True: - console.print('[red]Failed to validate cookies. Please confirm that the site is up and your username and password is valid.') + console.print( + "[red]Failed to validate cookies. Please confirm that the site is up and your username and password is valid." + ) recreate = cli_ui.ask_yes_no("Log in again and create new session?") if recreate is True: if os.path.exists(cookiefile): @@ -450,13 +597,15 @@ async def validate_credentials(self, meta): return False vapi = await self.validate_api() if vapi is not True: - console.print('[red]Failed to validate API. Please confirm that the site is up and your API key is valid.') + console.print( + "[red]Failed to validate API. Please confirm that the site is up and your API key is valid." + ) return True async def validate_api(self): url = self.search_url params = { - 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + "apikey": self.config["TRACKERS"][self.tracker]["api_key"].strip(), } try: r = requests.get(url, params=params) @@ -472,11 +621,11 @@ async def validate_cookies(self, meta, cookiefile): url = "https://www.morethantv.me/index.php" if os.path.exists(cookiefile): with requests.Session() as session: - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) resp = session.get(url=url) - if meta['debug']: - console.log('[cyan]Validate Cookies:') + if meta["debug"]: + console.log("[cyan]Validate Cookies:") console.log(session.cookies.get_dict()) console.log(resp.url) if resp.text.find("Logout") != -1: @@ -490,22 +639,22 @@ async def get_auth(self, cookiefile): url = "https://www.morethantv.me/index.php" if os.path.exists(cookiefile): with requests.Session() as session: - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) resp = session.get(url=url) - auth = resp.text.rsplit('authkey=', 1)[1][:32] + auth = resp.text.rsplit("authkey=", 1)[1][:32] return auth async def login(self, cookiefile): with requests.Session() as session: - url = 'https://www.morethantv.me/login' + url = "https://www.morethantv.me/login" payload = { - 'username': self.config['TRACKERS'][self.tracker].get('username'), - 'password': self.config['TRACKERS'][self.tracker].get('password'), - 'keeploggedin': 1, - 'cinfo': '1920|1080|24|0', - 'submit': 'login', - 'iplocked': 1, + "username": self.config["TRACKERS"][self.tracker].get("username"), + "password": self.config["TRACKERS"][self.tracker].get("password"), + "keeploggedin": 1, + "cinfo": "1920|1080|24|0", + "submit": "login", + "iplocked": 1, # 'ssl' : 'yes' } res = session.get(url="https://www.morethantv.me/login") @@ -515,27 +664,33 @@ async def login(self, cookiefile): resp = session.post(url=url, data=payload) # handle 2fa - if resp.url.endswith('twofactor/login'): - otp_uri = self.config['TRACKERS'][self.tracker].get('otp_uri') + if resp.url.endswith("twofactor/login"): + otp_uri = self.config["TRACKERS"][self.tracker].get("otp_uri") if otp_uri: import pyotp + mfa_code = pyotp.parse_uri(otp_uri).now() else: - mfa_code = console.input('[yellow]MTV 2FA Code: ') + mfa_code = console.input("[yellow]MTV 2FA Code: ") two_factor_payload = { - 'token': resp.text.rsplit('name="token" value="', 1)[1][:48], - 'code': mfa_code, - 'submit': 'login' + "token": resp.text.rsplit('name="token" value="', 1)[1][:48], + "code": mfa_code, + "submit": "login", } - resp = session.post(url="https://www.morethantv.me/twofactor/login", data=two_factor_payload) + resp = session.post( + url="https://www.morethantv.me/twofactor/login", + data=two_factor_payload, + ) # checking if logged in - if 'authkey=' in resp.text: - console.print('[green]Successfully logged in to MTV') - with open(cookiefile, 'wb') as cf: + if "authkey=" in resp.text: + console.print("[green]Successfully logged in to MTV") + with open(cookiefile, "wb") as cf: pickle.dump(session.cookies, cf) else: - console.print('[bold red]Something went wrong while trying to log into MTV') + console.print( + "[bold red]Something went wrong while trying to log into MTV" + ) await asyncio.sleep(1) console.print(resp.url) return @@ -544,35 +699,39 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 't': 'search', - 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'q': "" + "t": "search", + "apikey": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "q": "", } - if meta['imdb_id'] not in ("0", "", None): - params['imdbid'] = "tt" + meta['imdb_id'] - elif meta['tmdb'] != "0": - params['tmdbid'] = meta['tmdb'] - elif meta['tvdb_id'] != 0: - params['tvdbid'] = meta['tvdb_id'] + if meta["imdb_id"] not in ("0", "", None): + params["imdbid"] = "tt" + meta["imdb_id"] + elif meta["tmdb"] != "0": + params["tmdbid"] = meta["tmdb"] + elif meta["tvdb_id"] != 0: + params["tvdbid"] = meta["tvdb_id"] else: - params['q'] = meta['title'].replace(': ', ' ').replace('’', '').replace("'", '') + params["q"] = ( + meta["title"].replace(": ", " ").replace("’", "").replace("'", "") + ) try: rr = requests.get(url=self.search_url, params=params) if rr is not None: # process search results response_xml = xml.etree.ElementTree.fromstring(rr.text) - for each in response_xml.find('channel').findall('item'): - result = each.find('title').text + for each in response_xml.find("channel").findall("item"): + result = each.find("title").text dupes.append(result) else: - if 'status_message' in rr: + if "status_message" in rr: console.print(f"[yellow]{rr.get('status_message')}") await asyncio.sleep(5) else: console.print("[red]Site Seems to be down or not responding to API") except Exception: - console.print("[red]Unable to search for existing torrents on site. Most likely the site is down.") + console.print( + "[red]Unable to search for existing torrents on site. Most likely the site is down." + ) dupes.append("FAILED SEARCH") print(traceback.print_exc()) await asyncio.sleep(5) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 3711c54c2..93a911185 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -8,7 +8,7 @@ from src.console import console -class NBL(): +class NBL: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -16,24 +16,88 @@ class NBL(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'NBL' - self.source_flag = 'NBL' - self.upload_url = 'https://nebulance.io/upload.php' - self.search_url = 'https://nebulance.io/api.php' - self.api_key = self.config['TRACKERS'][self.tracker]['api_key'].strip() - self.banned_groups = ['0neshot', '3LTON', '4yEo', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'ASW', 'BakedFish', - 'bonkai77', 'Cleo', 'DeadFish', 'DeeJayAhmed', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'FGT', 'FUM', 'GERMini', 'HAiKU', 'Hi10', 'ION10', - 'JacobSwaggedUp', 'JIVE', 'Judas', 'LOAD', 'MeGusta', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NhaNc3', 'NOIVTC', - 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', - 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', - 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.tracker = "NBL" + self.source_flag = "NBL" + self.upload_url = "https://nebulance.io/upload.php" + self.search_url = "https://nebulance.io/api.php" + self.api_key = self.config["TRACKERS"][self.tracker]["api_key"].strip() + self.banned_groups = [ + "0neshot", + "3LTON", + "4yEo", + "[Oj]", + "AFG", + "AkihitoSubs", + "AniHLS", + "Anime Time", + "AnimeRG", + "AniURL", + "ASW", + "BakedFish", + "bonkai77", + "Cleo", + "DeadFish", + "DeeJayAhmed", + "ELiTE", + "EMBER", + "eSc", + "EVO", + "FGT", + "FUM", + "GERMini", + "HAiKU", + "Hi10", + "ION10", + "JacobSwaggedUp", + "JIVE", + "Judas", + "LOAD", + "MeGusta", + "Mr.Deadpool", + "mSD", + "NemDiggers", + "neoHEVC", + "NhaNc3", + "NOIVTC", + "PlaySD", + "playXD", + "project-gxs", + "PSA", + "QaS", + "Ranger", + "RAPiDCOWS", + "Raze", + "Reaktor", + "REsuRRecTioN", + "RMTeam", + "ROBOTS", + "SpaceFish", + "SPASM", + "SSA", + "Telly", + "Tenrai-Sensei", + "TM", + "Trix", + "URANiME", + "VipapkStudios", + "ViSiON", + "Wardevil", + "xRed", + "XS", + "YakuboEncodes", + "YuiSubs", + "ZKBL", + "ZmN", + "ZMNT", + ] pass async def get_cat_id(self, meta): - if meta.get('tv_pack', 0) == 1: + if meta.get("tv_pack", 0) == 1: cat_id = 3 else: cat_id = 1 @@ -44,32 +108,47 @@ async def edit_desc(self, meta): return async def upload(self, meta, disctype): - if meta['category'] != 'TV': + if meta["category"] != "TV": console.print("[red]Only TV Is allowed at NBL") return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + if meta["bdinfo"] is not None: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read()[:-65].strip() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'file_input': open_torrent} + mi_dump = ( + open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ) + .read()[:-65] + .strip() + ) + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"file_input": open_torrent} data = { - 'api_key': self.api_key, - 'tvmazeid': int(meta.get('tvmaze_id', 0)), - 'mediainfo': mi_dump, - 'category': await self.get_cat_id(meta), - 'ignoredupes': 'on' + "api_key": self.api_key, + "tvmazeid": int(meta.get("tvmaze_id", 0)), + "mediainfo": mi_dump, + "category": await self.get_cat_id(meta), + "ignoredupes": "on", } - if meta['debug'] is False: + if meta["debug"] is False: response = requests.post(url=self.upload_url, files=files, data=data) try: if response.ok: response = response.json() - console.print(response.get('message', response)) + console.print(response.get("message", response)) else: console.print(response) console.print(response.text) @@ -85,40 +164,45 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") - if int(meta.get('tvmaze_id', 0)) != 0: - search_term = {'tvmaze': int(meta['tvmaze_id'])} - elif int(meta.get('imdb_id', '0').replace('tt', '')) == 0: - search_term = {'imdb': meta.get('imdb_id', '0').replace('tt', '')} + if int(meta.get("tvmaze_id", 0)) != 0: + search_term = {"tvmaze": int(meta["tvmaze_id"])} + elif int(meta.get("imdb_id", "0").replace("tt", "")) == 0: + search_term = {"imdb": meta.get("imdb_id", "0").replace("tt", "")} else: - search_term = {'series': meta['title']} + search_term = {"series": meta["title"]} json = { - 'jsonrpc': '2.0', - 'id': 1, - 'method': 'getTorrents', - 'params': [ - self.api_key, - search_term - ] + "jsonrpc": "2.0", + "id": 1, + "method": "getTorrents", + "params": [self.api_key, search_term], } try: response = requests.get(url=self.search_url, json=json) response = response.json() - for each in response['result']['items']: - if meta['resolution'] in each['tags']: - if meta.get('tv_pack', 0) == 1: - if each['cat'] == "Season" and int(guessit(each['rls_name']).get('season', '1')) == int(meta.get('season_int')): - dupes.append(each['rls_name']) - elif int(guessit(each['rls_name']).get('episode', '0')) == int(meta.get('episode_int')): - dupes.append(each['rls_name']) + for each in response["result"]["items"]: + if meta["resolution"] in each["tags"]: + if meta.get("tv_pack", 0) == 1: + if each["cat"] == "Season" and int( + guessit(each["rls_name"]).get("season", "1") + ) == int(meta.get("season_int")): + dupes.append(each["rls_name"]) + elif int(guessit(each["rls_name"]).get("episode", "0")) == int( + meta.get("episode_int") + ): + dupes.append(each["rls_name"]) except requests.exceptions.JSONDecodeError: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) except KeyError as e: console.print(response) console.print("\n\n\n") - if e.args[0] == 'result': + if e.args[0] == "result": console.print(f"Search Term: {search_term}") - console.print('[red]NBL API Returned an unexpected response, please manually check for dupes') + console.print( + "[red]NBL API Returned an unexpected response, please manually check for dupes" + ) dupes.append("ERROR: PLEASE CHECK FOR EXISTING RELEASES MANUALLY") await asyncio.sleep(5) else: diff --git a/src/trackers/OE.py b/src/trackers/OE.py index ec332dc97..14a0d5378 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -9,7 +9,7 @@ from src.console import console -class OE(): +class OE: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -17,89 +17,242 @@ class OE(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'OE' - self.source_flag = 'OE' - self.search_url = 'https://onlyencodes.cc/api/torrents/filter' - self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' - self.torrent_url = 'https://onlyencodes.cc/api/torrents/' + self.tracker = "OE" + self.source_flag = "OE" + self.search_url = "https://onlyencodes.cc/api/torrents/filter" + self.upload_url = "https://onlyencodes.cc/api/torrents/upload" + self.torrent_url = "https://onlyencodes.cc/api/torrents/" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', - 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', - 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', - 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', - 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', - 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'nHD', 'nikt0', - 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', - 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', - 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', - 'TSPxL', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', - 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = [ + "0neshot", + "3LT0N", + "4K4U", + "4yEo", + "$andra", + "[Oj]", + "AFG", + "AkihitoSubs", + "AniHLS", + "Anime Time", + "AnimeRG", + "AniURL", + "AOC", + "AR", + "AROMA", + "ASW", + "aXXo", + "BakedFish", + "BiTOR", + "BRrip", + "bonkai", + "Cleo", + "CM8", + "C4K", + "CrEwSaDe", + "core", + "d3g", + "DDR", + "DeadFish", + "DeeJayAhmed", + "DNL", + "ELiTE", + "EMBER", + "eSc", + "EVO", + "EZTV", + "FaNGDiNG0", + "FGT", + "fenix", + "FUM", + "FRDS", + "FROZEN", + "GalaxyTV", + "GalaxyRG", + "GalaxyRG265", + "GERMini", + "Grym", + "GrymLegacy", + "HAiKU", + "HD2DVD", + "HDTime", + "Hi10", + "HiQVE", + "ION10", + "iPlanet", + "JacobSwaggedUp", + "JIVE", + "Judas", + "KiNGDOM", + "LAMA", + "Leffe", + "LiGaS", + "LOAD", + "LycanHD", + "MeGusta", + "MezRips", + "mHD", + "Mr.Deadpool", + "mSD", + "NemDiggers", + "neoHEVC", + "NeXus", + "nHD", + "nikt0", + "nSD", + "NhaNc3", + "NOIVTC", + "pahe.in", + "PlaySD", + "playXD", + "PRODJi", + "ProRes", + "project-gxs", + "PSA", + "QaS", + "Ranger", + "RAPiDCOWS", + "RARBG", + "Raze", + "RCDiVX", + "RDN", + "Reaktor", + "REsuRRecTioN", + "RMTeam", + "ROBOTS", + "rubix", + "SANTi", + "SHUTTERSHIT", + "SpaceFish", + "SPASM", + "SSA", + "TBS", + "Telly", + "Tenrai-Sensei", + "TERMiNAL", + "TGx", + "TM", + "topaz", + "TSP", + "TSPxL", + "URANiME", + "UTR", + "VipapkSudios", + "ViSION", + "WAF", + "Wardevil", + "x0r", + "xRed", + "XS", + "YakuboEncodes", + "YIFY", + "YTS", + "YuiSubs", + "ZKBL", + "ZmN", + "ZMNT", + ] pass async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id( + meta["type"], + meta.get("tv_pack", 0), + meta.get("video_codec"), + meta.get("category", ""), + ) + resolution_id = await self.get_res_id(meta["resolution"]) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': oe_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": oe_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) @@ -113,83 +266,90 @@ async def upload(self, meta, disctype): open_torrent.close() async def edit_name(self, meta): - oe_name = meta.get('name') + oe_name = meta.get("name") return oe_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type, tv_pack, video_codec, category): type_id = { - 'DISC': '19', - 'REMUX': '20', - 'WEBDL': '21', - }.get(type, '0') + "DISC": "19", + "REMUX": "20", + "WEBDL": "21", + }.get(type, "0") if type == "WEBRIP": if video_codec == "HEVC": # x265 Encode - type_id = '10' - if video_codec == 'AV1': + type_id = "10" + if video_codec == "AV1": # AV1 Encode - type_id = '14' - if video_codec == 'AVC': + type_id = "14" + if video_codec == "AVC": # x264 Encode - type_id = '15' + type_id = "15" if type == "ENCODE": if video_codec == "HEVC": # x265 Encode - type_id = '10' - if video_codec == 'AV1': + type_id = "10" + if video_codec == "AV1": # AV1 Encode - type_id = '14' - if video_codec == 'AVC': + type_id = "14" + if video_codec == "AVC": # x264 Encode - type_id = '15' + type_id = "15" return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id( + meta["type"], + meta.get("tv_pack", 0), + meta.get("sd", 0), + meta.get("category", ""), + ), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] + meta['edition'] + if meta["category"] == "TV": + params["name"] = f"{meta.get('season', '')}{meta.get('episode', '')}" + if meta.get("edition", "") != "": + params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 766ebd767..717e1041d 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -9,7 +9,7 @@ from src.console import console -class OTW(): +class OTW: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,116 +20,146 @@ class OTW(): def __init__(self, config): self.config = config - self.tracker = 'OTW' - self.source_flag = 'OLD' - self.upload_url = 'https://oldtoons.world/api/torrents/upload' - self.search_url = 'https://oldtoons.world/api/torrents/filter' + self.tracker = "OTW" + self.source_flag = "OLD" + self.upload_url = "https://oldtoons.world/api/torrents/upload" + self.search_url = "https://oldtoons.world/api/torrents/filter" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -144,25 +174,27 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index 0f0fde007..69adfecbb 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -9,7 +9,7 @@ from src.console import console -class PSS(): +class PSS: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,118 +20,212 @@ class PSS(): def __init__(self, config): self.config = config - self.tracker = 'PSS' - self.source_flag = 'PSS' - self.upload_url = 'https://privatesilverscreen.cc/api/torrents/upload' - self.search_url = 'https://privatesilverscreen.cc/api/torrents/filter' - self.signature = '\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]' - self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'NeXus', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', - 'QxR', 'Ralphy', 'RARBG', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL', 'Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', - 'msd', 'nikt0', 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'Leffe', 'mHD', 'mSD', 'nHD', 'nSD', 'NhaNc3', 'PRODJi', - 'RDN', 'SANTi', 'ViSION', 'WAF', 'YTS', 'FROZEN', 'UTR', 'Grym', 'GrymLegacy', 'CK4', 'ProRes', 'MezRips', 'GalaxyRG', 'RCDiVX', 'LycanHD'] + self.tracker = "PSS" + self.source_flag = "PSS" + self.upload_url = "https://privatesilverscreen.cc/api/torrents/upload" + self.search_url = "https://privatesilverscreen.cc/api/torrents/filter" + self.signature = "\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]" + self.banned_groups = [ + "4K4U", + "AROMA", + "d3g", + "edge2020", + "EMBER", + "EVO", + "FGT", + "NeXus", + "ION10", + "iVy", + "Judas", + "LAMA", + "MeGusta", + "nikt0", + "OEPlus", + "OFT", + "OsC", + "PYC", + "QxR", + "Ralphy", + "RARBG", + "SAMPA", + "Sicario", + "Silence", + "STUTTERSHIT", + "Tigole", + "TSP", + "TSPxL", + "Will1869", + "x0r", + "YIFY", + "core", + "ZMNT", + "msd", + "nikt0", + "aXXo", + "BRrip", + "CM8", + "CrEwSaDe", + "DNL", + "FaNGDiNG0", + "FRDS", + "HD2DVD", + "HDTime", + "Leffe", + "mHD", + "mSD", + "nHD", + "nSD", + "NhaNc3", + "PRODJi", + "RDN", + "SANTi", + "ViSION", + "WAF", + "YTS", + "FROZEN", + "UTR", + "Grym", + "GrymLegacy", + "CK4", + "ProRes", + "MezRips", + "GalaxyRG", + "RCDiVX", + "LycanHD", + ] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "ENCODE": "3", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -146,25 +240,27 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 88fea3a80..f6ef3daa9 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -14,17 +14,17 @@ from src.console import console -class PTER(): +class PTER: def __init__(self, config): self.config = config - self.tracker = 'PTER' - self.source_flag = 'PTER' - self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() - self.username = config['TRACKERS']['PTER'].get('username', '').strip() - self.password = config['TRACKERS']['PTER'].get('password', '').strip() - self.rehost_images = config['TRACKERS']['PTER'].get('img_rehost', False) - self.ptgen_api = config['TRACKERS']['PTER'].get('ptgen_api').strip() + self.tracker = "PTER" + self.source_flag = "PTER" + self.passkey = str(config["TRACKERS"]["PTER"].get("passkey", "")).strip() + self.username = config["TRACKERS"]["PTER"].get("username", "").strip() + self.password = config["TRACKERS"]["PTER"].get("password", "").strip() + self.rehost_images = config["TRACKERS"]["PTER"].get("img_rehost", False) + self.ptgen_api = config["TRACKERS"]["PTER"].get("ptgen_api").strip() self.ptgen_retry = 3 self.signature = None @@ -33,7 +33,9 @@ def __init__(self, config): async def validate_credentials(self, meta): vcookie = await self.validate_cookies(meta) if vcookie is not True: - console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') + console.print( + "[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid." + ) return False return True @@ -46,12 +48,17 @@ async def validate_cookies(self, meta): session.cookies.update(await common.parseCookieFile(cookiefile)) resp = session.get(url=url) - if meta['debug']: - console.print('[cyan]Cookies:') + if meta["debug"]: + console.print("[cyan]Cookies:") console.print(session.cookies.get_dict()) console.print("\n\n") console.print(resp.text) - if resp.text.find("""""") != -1: + if ( + resp.text.find( + """""" + ) + != -1 + ): return True else: return False @@ -66,19 +73,19 @@ async def search_existing(self, meta, disctype): if os.path.exists(cookiefile): with requests.Session() as session: session.cookies.update(await common.parseCookieFile(cookiefile)) - if int(meta['imdb_id'].replace('tt', '')) != 0: + if int(meta["imdb_id"].replace("tt", "")) != 0: imdb = f"tt{meta['imdb_id']}" else: imdb = "" source = await self.get_type_medium_id(meta) search_url = f"https://pterclub.com/torrents.php?search={imdb}&incldead=0&search_mode=0&source{source}=1" r = session.get(search_url) - soup = BeautifulSoup(r.text, 'lxml') - rows = soup.select('table.torrents > tr:has(table.torrentname)') + soup = BeautifulSoup(r.text, "lxml") + rows = soup.select("table.torrents > tr:has(table.torrentname)") for row in rows: text = row.select_one('a[href^="details.php?id="]') if text is not None: - release = text.attrs['title'] + release = text.attrs["title"] if release: dupes.append(release) else: @@ -89,16 +96,22 @@ async def search_existing(self, meta, disctype): async def get_type_category_id(self, meta): cat_id = "EXIT" - if meta['category'] == 'MOVIE': + if meta["category"] == "MOVIE": cat_id = 401 - if meta['category'] == 'TV': + if meta["category"] == "TV": cat_id = 404 - if 'documentary' in meta.get("genres", "").lower() or 'documentary' in meta.get("keywords", "").lower(): + if ( + "documentary" in meta.get("genres", "").lower() + or "documentary" in meta.get("keywords", "").lower() + ): cat_id = 402 - if 'Animation' in meta.get("genres", "").lower() or 'Animation' in meta.get("keywords", "").lower(): + if ( + "Animation" in meta.get("genres", "").lower() + or "Animation" in meta.get("keywords", "").lower() + ): cat_id = 403 return cat_id @@ -107,12 +120,28 @@ async def get_area_id(self, meta): area_id = 8 area_map = { # To do - "中国大陆": 1, "中国香港": 2, "中国台湾": 3, "美国": 4, "日本": 6, "韩国": 5, - "印度": 7, "法国": 4, "意大利": 4, "德国": 4, "西班牙": 4, "葡萄牙": 4, - "英国": 4, "阿根廷": 8, "澳大利亚": 4, "比利时": 4, - "巴西": 8, "加拿大": 4, "瑞士": 4, "智利": 8, + "中国大陆": 1, + "中国香港": 2, + "中国台湾": 3, + "美国": 4, + "日本": 6, + "韩国": 5, + "印度": 7, + "法国": 4, + "意大利": 4, + "德国": 4, + "西班牙": 4, + "葡萄牙": 4, + "英国": 4, + "阿根廷": 8, + "澳大利亚": 4, + "比利时": 4, + "巴西": 8, + "加拿大": 4, + "瑞士": 4, + "智利": 8, } - regions = meta['ptgen'].get("region", []) + regions = meta["ptgen"].get("region", []) for area in area_map.keys(): if area in regions: return area_map[area] @@ -121,66 +150,81 @@ async def get_area_id(self, meta): async def get_type_medium_id(self, meta): medium_id = "EXIT" # 1 = UHD Discs - if meta.get('is_disc', '') in ("BDMV", "HD DVD"): - if meta['resolution'] == '2160p': + if meta.get("is_disc", "") in ("BDMV", "HD DVD"): + if meta["resolution"] == "2160p": medium_id = 1 else: medium_id = 2 # BD Discs - if meta.get('is_disc', '') == "DVD": + if meta.get("is_disc", "") == "DVD": medium_id = 7 # 4 = HDTV - if meta.get('type', '') == "HDTV": + if meta.get("type", "") == "HDTV": medium_id = 4 # 6 = Encode - if meta.get('type', '') in ("ENCODE", "WEBRIP"): + if meta.get("type", "") in ("ENCODE", "WEBRIP"): medium_id = 6 # 3 = Remux - if meta.get('type', '') == "REMUX": + if meta.get("type", "") == "REMUX": medium_id = 3 # 5 = WEB-DL - if meta.get('type', '') == "WEBDL": + if meta.get("type", "") == "WEBDL": medium_id = 5 return medium_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as descfile: from src.bbcode import BBCODE from src.trackers.COMMON import COMMON + common = COMMON(config=self.config) - if int(meta.get('imdb_id', '0').replace('tt', '')) != 0: + if int(meta.get("imdb_id", "0").replace("tt", "")) != 0: ptgen = await common.ptgen(meta, self.ptgen_api, self.ptgen_retry) - if ptgen.strip() != '': + if ptgen.strip() != "": descfile.write(ptgen) bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] + if meta.get("discs", []) != []: + discs = meta["discs"] for each in discs: - if each['type'] == "BDMV": + if each["type"] == "BDMV": descfile.write(f"[hide=BDInfo]{each['summary']}[/hide]\n") descfile.write("\n") pass - if each['type'] == "DVD": + if each["type"] == "DVD": descfile.write(f"{each['name']}:\n") - descfile.write(f"[hide=mediainfo][{each['vob_mi']}[/hide] [hide=mediainfo][{each['ifo_mi']}[/hide]\n") + descfile.write( + f"[hide=mediainfo][{each['vob_mi']}[/hide] [hide=mediainfo][{each['ifo_mi']}[/hide]\n" + ) descfile.write("\n") else: - mi = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + mi = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ).read() descfile.write(f"[hide=mediainfo]{mi}[/hide]") descfile.write("\n") desc = base desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = desc.replace('[img]', '[img]') + desc = desc.replace("[img]", "[img]") desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) @@ -189,18 +233,18 @@ async def edit_desc(self, meta): images = await self.pterimg_upload(meta) if len(images) > 0: descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + for each in range(len(images[: int(meta["screens"])])): + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") else: - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + for each in range(len(images[: int(meta["screens"])])): + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") @@ -216,27 +260,35 @@ async def get_auth_token(self, meta): with requests.Session() as session: loggedIn = False if os.path.exists(cookiefile): - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) r = session.get("https://s3.pterclub.com") loggedIn = await self.validate_login(r) else: - console.print("[yellow]Pterimg Cookies not found. Creating new session.") + console.print( + "[yellow]Pterimg Cookies not found. Creating new session." + ) if loggedIn is True: - auth_token = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] + auth_token = re.search(r"auth_token.*?\"(\w+)\"", r.text).groups()[0] else: data = { - 'login-subject': self.username, - 'password': self.password, - 'keep-login': 1 + "login-subject": self.username, + "password": self.password, + "keep-login": 1, } r = session.get("https://s3.pterclub.com") - data['auth_token'] = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] - loginresponse = session.post(url='https://s3.pterclub.com/login', data=data) + data["auth_token"] = re.search( + r"auth_token.*?\"(\w+)\"", r.text + ).groups()[0] + loginresponse = session.post( + url="https://s3.pterclub.com/login", data=data + ) if not loginresponse.ok: raise LoginException("Failed to login to Pterimg. ") # noqa #F405 - auth_token = re.search(r'auth_token = *?\"(\w+)\"', loginresponse.text).groups()[0] - with open(cookiefile, 'wb') as cf: + auth_token = re.search( + r"auth_token = *?\"(\w+)\"", loginresponse.text + ).groups()[0] + with open(cookiefile, "wb") as cf: pickle.dump(session.cookies, cf) return auth_token @@ -249,72 +301,87 @@ async def validate_login(self, response): return loggedIn async def pterimg_upload(self, meta): - images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") - url = 'https://s3.pterclub.com' + images = glob.glob( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png" + ) + url = "https://s3.pterclub.com" image_list = [] data = { - 'type': 'file', - 'action': 'upload', - 'nsfw': 0, - 'auth_token': await self.get_auth_token(meta) + "type": "file", + "action": "upload", + "nsfw": 0, + "auth_token": await self.get_auth_token(meta), } cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" with requests.Session() as session: if os.path.exists(cookiefile): - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) files = {} for i in range(len(images)): - files = {'source': open(images[i], 'rb')} - req = session.post(f'{url}/json', data=data, files=files) + files = {"source": open(images[i], "rb")} + req = session.post(f"{url}/json", data=data, files=files) try: res = req.json() except json.decoder.JSONDecodeError: res = {} if not req.ok: - if res['error']['message'] in ('重复上传', 'Duplicated upload'): + if res["error"]["message"] in ( + "重复上传", + "Duplicated upload", + ): continue - raise (f'HTTP {req.status_code}, reason: {res["error"]["message"]}') + raise ( + f'HTTP {req.status_code}, reason: {res["error"]["message"]}' + ) image_dict = {} - image_dict['web_url'] = res['image']['url'] - image_dict['img_url'] = res['image']['url'] + image_dict["web_url"] = res["image"]["url"] + image_dict["img_url"] = res["image"]["url"] image_list.append(image_dict) return image_list async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: - anon = 'no' + if ( + anon == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): + anon = "no" else: - anon = 'yes' + anon = "yes" return anon async def edit_name(self, meta): - pter_name = meta['name'] + pter_name = meta["name"] - remove_list = ['Dubbed', 'Dual-Audio'] + remove_list = ["Dubbed", "Dual-Audio"] for each in remove_list: - pter_name = pter_name.replace(each, '') + pter_name = pter_name.replace(each, "") - pter_name = pter_name.replace(meta["aka"], '') - pter_name = pter_name.replace('PQ10', 'HDR') + pter_name = pter_name.replace(meta["aka"], "") + pter_name = pter_name.replace("PQ10", "HDR") - if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) is True: - pter_name = pter_name.replace('H.264', 'x264') + if meta["type"] == "WEBDL" and meta.get("has_encode_settings", False) is True: + pter_name = pter_name.replace("H.264", "x264") return pter_name async def is_zhongzi(self, meta): - if meta.get('is_disc', '') != 'BDMV': - mi = meta['mediainfo'] - for track in mi['media']['track']: - if track['@type'] == "Text": - language = track.get('Language') + if meta.get("is_disc", "") != "BDMV": + mi = meta["mediainfo"] + for track in mi["media"]["track"]: + if track["@type"] == "Text": + language = track.get("Language") if language == "zh": - return 'yes' + return "yes" else: - for language in meta['bdinfo']['subtitles']: + for language in meta["bdinfo"]["subtitles"]: if language == "Chinese": - return 'yes' + return "yes" return None async def upload(self, meta, disctype): @@ -322,38 +389,56 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - desc_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc_file = ( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + ) if not os.path.exists(desc_file): await self.edit_desc(meta) pter_name = await self.edit_name(meta) - if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') + if meta["bdinfo"] is not None: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ) else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ) - pter_desc = open(desc_file, 'r').read() + pter_desc = open(desc_file, "r").read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - with open(torrent_path, 'rb') as torrentFile: - if len(meta['filelist']) == 1: - torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) + with open(torrent_path, "rb") as torrentFile: + if len(meta["filelist"]) == 1: + torrentFileName = unidecode( + os.path.basename(meta["video"]).replace(" ", ".") + ) else: - torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) + torrentFileName = unidecode( + os.path.basename(meta["path"]).replace(" ", ".") + ) files = { - 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + "file": ( + f"{torrentFileName}.torrent", + torrentFile, + "application/x-bittorent", + ), } # use chinese small_descr - if meta['ptgen']["trans_title"] != ['']: - small_descr = '' - for title_ in meta['ptgen']["trans_title"]: - small_descr += f'{title_} / ' - small_descr += "| 类别:" + meta['ptgen']["genre"][0] - small_descr = small_descr.replace('/ |', '|') + if meta["ptgen"]["trans_title"] != [""]: + small_descr = "" + for title_ in meta["ptgen"]["trans_title"]: + small_descr += f"{title_} / " + small_descr += "| 类别:" + meta["ptgen"]["genre"][0] + small_descr = small_descr.replace("/ |", "|") else: - small_descr = meta['title'] + small_descr = meta["title"] data = { "name": pter_name, "small_descr": small_descr, @@ -361,17 +446,17 @@ async def upload(self, meta, disctype): "type": await self.get_type_category_id(meta), "source_sel": await self.get_type_medium_id(meta), "team_sel": await self.get_area_id(meta), - "uplver": await self.get_anon(meta['anon']), - "zhongzi": await self.is_zhongzi(meta) + "uplver": await self.get_anon(meta["anon"]), + "zhongzi": await self.is_zhongzi(meta), } - if meta.get('personalrelease', False) is True: + if meta.get("personalrelease", False) is True: data["pr"] = "yes" url = "https://pterclub.com/takeupload.php" # Submit - if meta['debug']: + if meta["debug"]: console.print(url) console.print(data) else: @@ -384,21 +469,32 @@ async def upload(self, meta, disctype): mi_dump.close() if up.url.startswith("https://pterclub.com/details.php?id="): - console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1', '')}[/yellow][/green]") - id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) + console.print( + f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1', '')}[/yellow][/green]" + ) + id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group( + 2 + ) await self.download_new_torrent(id, torrent_path) else: console.print(data) console.print("\n\n") - raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 + raise UploadException( + f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", + "red", + ) # noqa #F405 return async def download_new_torrent(self, id, torrent_path): - download_url = f"https://pterclub.com/download.php?id={id}&passkey={self.passkey}" + download_url = ( + f"https://pterclub.com/download.php?id={id}&passkey={self.passkey}" + ) r = requests.get(url=download_url) if r.status_code == 200: with open(torrent_path, "wb") as tor: tor.write(r.content) else: - console.print("[red]There was an issue downloading the new .torrent from pter") + console.print( + "[red]There was an issue downloading the new .torrent from pter" + ) console.print(r.text) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index b9e58760f..3331a79bd 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -20,35 +20,79 @@ from datetime import datetime -class PTP(): +class PTP: def __init__(self, config): self.config = config - self.tracker = 'PTP' - self.source_flag = 'PTP' - self.api_user = config['TRACKERS']['PTP'].get('ApiUser', '').strip() - self.api_key = config['TRACKERS']['PTP'].get('ApiKey', '').strip() - self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() - self.username = config['TRACKERS']['PTP'].get('username', '').strip() - self.password = config['TRACKERS']['PTP'].get('password', '').strip() - self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) - self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', - 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', - 'WAF', 'x0r', 'YIFY',] + self.tracker = "PTP" + self.source_flag = "PTP" + self.api_user = config["TRACKERS"]["PTP"].get("ApiUser", "").strip() + self.api_key = config["TRACKERS"]["PTP"].get("ApiKey", "").strip() + self.announce_url = config["TRACKERS"]["PTP"].get("announce_url", "").strip() + self.username = config["TRACKERS"]["PTP"].get("username", "").strip() + self.password = config["TRACKERS"]["PTP"].get("password", "").strip() + self.web_source = str2bool( + str(config["TRACKERS"]["PTP"].get("add_web_source_to_desc", True)) + ) + self.user_agent = ( + f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" + ) + self.banned_groups = [ + "aXXo", + "BMDru", + "BRrip", + "CM8", + "CrEwSaDe", + "CTFOH", + "d3g", + "DNL", + "FaNGDiNG0", + "HD2DVD", + "HDTime", + "ION10", + "iPlanet", + "KiNGDOM", + "mHD", + "mSD", + "nHD", + "nikt0", + "nSD", + "NhaNc3", + "OFT", + "PRODJi", + "SANTi", + "SPiRiT", + "STUTTERSHIT", + "ViSION", + "VXT", + "WAF", + "x0r", + "YIFY", + ] self.sub_lang_map = { ("Arabic", "ara", "ar"): 22, - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 49, + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", "pt-br"): 49, ("Bulgarian", "bul", "bg"): 29, - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 14, + ( + "Chinese", + "chi", + "zh", + "Chinese (Simplified)", + "Chinese (Traditional)", + ): 14, ("Croatian", "hrv", "hr", "scr"): 23, ("Czech", "cze", "cz", "cs"): 30, ("Danish", "dan", "da"): 10, ("Dutch", "dut", "nl"): 9, ("English", "eng", "en", "English (CC)", "English - SDH"): 3, ("English - Forced", "English (Forced)", "en (Forced)"): 50, - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 51, + ( + "English Intertitles", + "English (Intertitles)", + "English - Intertitles", + "en (Intertitles)", + ): 51, ("Estonian", "est", "et"): 38, ("Finnish", "fin", "fi"): 15, ("French", "fre", "fr"): 5, @@ -84,48 +128,64 @@ def __init__(self, config): async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): imdb_id = ptp_torrent_id = None filename = str(os.path.basename(search_term)) - params = { - 'filelist': filename - } + params = {"filelist": filename} headers = { - 'ApiUser': self.api_user, - 'ApiKey': self.api_key, - 'User-Agent': self.user_agent + "ApiUser": self.api_user, + "ApiKey": self.api_key, + "User-Agent": self.user_agent, } - url = 'https://passthepopcorn.me/torrents.php' + url = "https://passthepopcorn.me/torrents.php" response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) - console.print(f"[green]Searching PTP for: [bold yellow]{filename}[/bold yellow]") + console.print( + f"[green]Searching PTP for: [bold yellow]{filename}[/bold yellow]" + ) try: if response.status_code == 200: response = response.json() # console.print(f"[blue]Raw API Response: {response}[/blue]") - if int(response['TotalResults']) >= 1: - for movie in response['Movies']: - if len(movie['Torrents']) >= 1: - for torrent in movie['Torrents']: + if int(response["TotalResults"]) >= 1: + for movie in response["Movies"]: + if len(movie["Torrents"]) >= 1: + for torrent in movie["Torrents"]: # First, try matching in filelist > path - for file in torrent['FileList']: - if file.get('Path') == filename: - imdb_id = movie['ImdbId'] - ptp_torrent_id = torrent['Id'] - dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) - console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') + for file in torrent["FileList"]: + if file.get("Path") == filename: + imdb_id = movie["ImdbId"] + ptp_torrent_id = torrent["Id"] + dummy, ptp_torrent_hash, *_ = ( + await self.get_imdb_from_torrent_id( + ptp_torrent_id + ) + ) + console.print( + f"[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]" + ) # Call get_torrent_info and print the results - tinfo = await self.get_torrent_info(imdb_id, meta) - console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") + tinfo = await self.get_torrent_info( + imdb_id, meta + ) + console.print( + f"[cyan]Torrent Info: {tinfo}[/cyan]" + ) return imdb_id, ptp_torrent_id, ptp_torrent_hash # If no match in filelist > path, check directly in filepath - if torrent.get('FilePath') == filename: - imdb_id = movie['ImdbId'] - ptp_torrent_id = torrent['Id'] - dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) - console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') + if torrent.get("FilePath") == filename: + imdb_id = movie["ImdbId"] + ptp_torrent_id = torrent["Id"] + dummy, ptp_torrent_hash, *_ = ( + await self.get_imdb_from_torrent_id( + ptp_torrent_id + ) + ) + console.print( + f"[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]" + ) # Call get_torrent_info and print the results tinfo = await self.get_torrent_info(imdb_id, meta) @@ -133,7 +193,9 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): return imdb_id, ptp_torrent_id, ptp_torrent_hash - console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') + console.print( + f"[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP" + ) return None, None, None elif response.status_code in [400, 401, 403]: @@ -148,31 +210,31 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): return None, None, None except Exception as e: - console.print(f'[red]An error occurred: {str(e)}[/red]') + console.print(f"[red]An error occurred: {str(e)}[/red]") - console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') + console.print( + f"[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP" + ) return None, None, None async def get_imdb_from_torrent_id(self, ptp_torrent_id): - params = { - 'torrentid': ptp_torrent_id - } + params = {"torrentid": ptp_torrent_id} headers = { - 'ApiUser': self.api_user, - 'ApiKey': self.api_key, - 'User-Agent': self.user_agent + "ApiUser": self.api_user, + "ApiKey": self.api_key, + "User-Agent": self.user_agent, } - url = 'https://passthepopcorn.me/torrents.php' + url = "https://passthepopcorn.me/torrents.php" response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) try: if response.status_code == 200: response = response.json() - imdb_id = response['ImdbId'] + imdb_id = response["ImdbId"] ptp_infohash = None - for torrent in response['Torrents']: - if torrent.get('Id', 0) == str(ptp_torrent_id): - ptp_infohash = torrent.get('InfoHash', None) + for torrent in response["Torrents"]: + if torrent.get("Id", 0) == str(ptp_torrent_id): + ptp_infohash = torrent.get("InfoHash", None) return imdb_id, ptp_infohash, None elif int(response.status_code) in [400, 401, 403]: console.print(response.text) @@ -186,17 +248,16 @@ async def get_imdb_from_torrent_id(self, ptp_torrent_id): return None, None, None async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): - params = { - 'id': ptp_torrent_id, - 'action': 'get_description' - } + params = {"id": ptp_torrent_id, "action": "get_description"} headers = { - 'ApiUser': self.api_user, - 'ApiKey': self.api_key, - 'User-Agent': self.user_agent + "ApiUser": self.api_user, + "ApiKey": self.api_key, + "User-Agent": self.user_agent, } - url = 'https://passthepopcorn.me/torrents.php' - console.print(f"[yellow]Requesting description from {url} with ID {ptp_torrent_id}") + url = "https://passthepopcorn.me/torrents.php" + console.print( + f"[yellow]Requesting description from {url} with ID {ptp_torrent_id}" + ) response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) @@ -207,19 +268,25 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) console.print("[bold green]Successfully grabbed description from PTP") - console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity + console.print( + f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}..." + ) # Show first 1000 characters for brevity - if not meta.get('ptp') or meta['unattended']: + if not meta.get("ptp") or meta["unattended"]: # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") + console.print( + "[cyan]Do you want to edit, discard or keep the description?[/cyan]" + ) + edit_choice = input( + "Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: " + ) - if edit_choice.lower() == 'e': + if edit_choice.lower() == "e": edited_description = click.edit(desc) if edited_description: desc = edited_description.strip() console.print(f"[green]Final description after editing:[/green] {desc}") - elif edit_choice.lower() == 'd': + elif edit_choice.lower() == "d": desc = None console.print("[yellow]Description discarded.[/yellow]") else: @@ -229,40 +296,42 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): async def get_group_by_imdb(self, imdb): params = { - 'imdb': imdb, + "imdb": imdb, } headers = { - 'ApiUser': self.api_user, - 'ApiKey': self.api_key, - 'User-Agent': self.user_agent + "ApiUser": self.api_user, + "ApiKey": self.api_key, + "User-Agent": self.user_agent, } - url = 'https://passthepopcorn.me/torrents.php' + url = "https://passthepopcorn.me/torrents.php" response = requests.get(url=url, headers=headers, params=params) await asyncio.sleep(1) try: response = response.json() if response.get("Page") == "Browse": # No Releases on Site with ID return None - elif response.get('Page') == "Details": # Group Found - groupID = response.get('GroupId') - console.print(f"[green]Matched IMDb: [yellow]tt{imdb}[/yellow] to Group ID: [yellow]{groupID}[/yellow][/green]") - console.print(f"[green]Title: [yellow]{response.get('Name')}[/yellow] ([yellow]{response.get('Year')}[/yellow])") + elif response.get("Page") == "Details": # Group Found + groupID = response.get("GroupId") + console.print( + f"[green]Matched IMDb: [yellow]tt{imdb}[/yellow] to Group ID: [yellow]{groupID}[/yellow][/green]" + ) + console.print( + f"[green]Title: [yellow]{response.get('Name')}[/yellow] ([yellow]{response.get('Year')}[/yellow])" + ) return groupID except Exception: console.print("[red]An error has occured trying to find a group ID") - console.print("[red]Please check that the site is online and your ApiUser/ApiKey values are correct") + console.print( + "[red]Please check that the site is online and your ApiUser/ApiKey values are correct" + ) return None async def get_torrent_info(self, imdb, meta): - params = { - 'imdb': imdb, - 'action': 'torrent_info', - 'fast': 1 - } + params = {"imdb": imdb, "action": "torrent_info", "fast": 1} headers = { - 'ApiUser': self.api_user, - 'ApiKey': self.api_key, - 'User-Agent': self.user_agent + "ApiUser": self.api_user, + "ApiKey": self.api_key, + "User-Agent": self.user_agent, } url = "https://passthepopcorn.me/ajax.php" response = requests.get(url=url, params=params, headers=headers) @@ -275,9 +344,15 @@ async def get_torrent_info(self, imdb, meta): for key, value in response[0].items(): if value not in (None, ""): tinfo[key] = value - if tinfo['tags'] == "": - tags = self.get_tags([meta.get("genres", ""), meta.get("keywords", ""), meta['imdb_info']['genres']]) - tinfo['tags'] = ", ".join(tags) + if tinfo["tags"] == "": + tags = self.get_tags( + [ + meta.get("genres", ""), + meta.get("keywords", ""), + meta["imdb_info"]["genres"], + ] + ) + tinfo["tags"] = ", ".join(tags) except Exception: pass return tinfo @@ -289,71 +364,106 @@ async def get_torrent_info_tmdb(self, meta): "album_desc": meta.get("overview", ""), } tags = await self.get_tags([meta.get("genres", ""), meta.get("keywords", "")]) - tinfo['tags'] = ", ".join(tags) + tinfo["tags"] = ", ".join(tags) return tinfo async def get_tags(self, check_against): tags = [] ptp_tags = [ - "action", "adventure", "animation", "arthouse", "asian", "biography", "camp", "comedy", - "crime", "cult", "documentary", "drama", "experimental", "exploitation", "family", "fantasy", "film.noir", - "history", "horror", "martial.arts", "musical", "mystery", "performance", "philosophy", "politics", "romance", - "sci.fi", "short", "silent", "sport", "thriller", "video.art", "war", "western" + "action", + "adventure", + "animation", + "arthouse", + "asian", + "biography", + "camp", + "comedy", + "crime", + "cult", + "documentary", + "drama", + "experimental", + "exploitation", + "family", + "fantasy", + "film.noir", + "history", + "horror", + "martial.arts", + "musical", + "mystery", + "performance", + "philosophy", + "politics", + "romance", + "sci.fi", + "short", + "silent", + "sport", + "thriller", + "video.art", + "war", + "western", ] if not isinstance(check_against, list): check_against = [check_against] for each in ptp_tags: - if any(each.replace('.', '') in x for x in check_against.lower().replace(' ', '').replace('-', '')): + if any( + each.replace(".", "") in x + for x in check_against.lower().replace(" ", "").replace("-", "") + ): tags.append(each) return tags async def search_existing(self, groupID, meta, disctype): # Map resolutions to SD / HD / UHD quality = None - if meta.get('sd', 0) == 1: # 1 is SD + if meta.get("sd", 0) == 1: # 1 is SD quality = "Standard Definition" - elif meta['resolution'] in ["1440p", "1080p", "1080i", "720p"]: + elif meta["resolution"] in ["1440p", "1080p", "1080i", "720p"]: quality = "High Definition" - elif meta['resolution'] in ["2160p", "4320p", "8640p"]: + elif meta["resolution"] in ["2160p", "4320p", "8640p"]: quality = "Ultra High Definition" params = { - 'id': groupID, + "id": groupID, } headers = { - 'ApiUser': self.api_user, - 'ApiKey': self.api_key, - 'User-Agent': self.user_agent + "ApiUser": self.api_user, + "ApiKey": self.api_key, + "User-Agent": self.user_agent, } - url = 'https://passthepopcorn.me/torrents.php' + url = "https://passthepopcorn.me/torrents.php" response = requests.get(url=url, headers=headers, params=params) await asyncio.sleep(1) existing = [] try: response = response.json() - torrents = response.get('Torrents', []) + torrents = response.get("Torrents", []) if len(torrents) != 0: for torrent in torrents: - if torrent.get('Quality') == quality and quality is not None: - existing.append(f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}") + if torrent.get("Quality") == quality and quality is not None: + existing.append( + f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}" + ) except Exception: console.print("[red]An error has occured trying to find existing releases") return existing async def ptpimg_url_rehost(self, image_url): payload = { - 'format': 'json', - 'api_key': self.config["DEFAULT"]["ptpimg_api"], - 'link-upload': image_url + "format": "json", + "api_key": self.config["DEFAULT"]["ptpimg_api"], + "link-upload": image_url, } - headers = {'referer': 'https://ptpimg.me/index.php'} + headers = {"referer": "https://ptpimg.me/index.php"} url = "https://ptpimg.me/upload.php" response = requests.post(url, headers=headers, data=payload) try: response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] + ptpimg_code = response[0]["code"] + ptpimg_ext = response[0]["ext"] img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" except Exception: console.print("[red]PTPIMG image rehost failed") @@ -363,10 +473,13 @@ async def ptpimg_url_rehost(self, image_url): def get_type(self, imdb_info, meta): ptpType = None - if imdb_info['type'] is not None: - imdbType = imdb_info.get('type', 'movie').lower() + if imdb_info["type"] is not None: + imdbType = imdb_info.get("type", "movie").lower() if imdbType in ("movie", "tv movie"): - if int(imdb_info.get('runtime', '60')) >= 45 or int(imdb_info.get('runtime', '60')) == 0: + if ( + int(imdb_info.get("runtime", "60")) >= 45 + or int(imdb_info.get("runtime", "60")) == 0 + ): ptpType = "Feature Film" else: ptpType = "Short Film" @@ -382,7 +495,10 @@ def get_type(self, imdb_info, meta): keywords = meta.get("keywords", "").lower() tmdb_type = meta.get("tmdb_type", "movie").lower() if tmdb_type == "movie": - if int(meta.get('runtime', 60)) >= 45 or int(meta.get('runtime', 60)) == 0: + if ( + int(meta.get("runtime", 60)) >= 45 + or int(meta.get("runtime", 60)) == 0 + ): ptpType = "Feature Film" else: ptpType = "Short Film" @@ -395,23 +511,32 @@ def get_type(self, imdb_info, meta): elif "concert" in keywords: ptpType = "Concert" if ptpType is None: - if meta.get('mode', 'discord') == 'cli': - ptpTypeList = ["Feature Film", "Short Film", "Miniseries", "Stand-up Comedy", "Concert", "Movie Collection"] - ptpType = cli_ui.ask_choice("Select the proper type", choices=ptpTypeList) + if meta.get("mode", "discord") == "cli": + ptpTypeList = [ + "Feature Film", + "Short Film", + "Miniseries", + "Stand-up Comedy", + "Concert", + "Movie Collection", + ] + ptpType = cli_ui.ask_choice( + "Select the proper type", choices=ptpTypeList + ) return ptpType def get_codec(self, meta): - if meta['is_disc'] == "BDMV": - bdinfo = meta['bdinfo'] + if meta["is_disc"] == "BDMV": + bdinfo = meta["bdinfo"] bd_sizes = [25, 50, 66, 100] for each in bd_sizes: - if bdinfo['size'] < each: + if bdinfo["size"] < each: codec = f"BD{each}" break - elif meta['is_disc'] == "DVD": - if "DVD5" in meta['dvd_size']: + elif meta["is_disc"] == "DVD": + if "DVD5" in meta["dvd_size"]: codec = "DVD5" - elif "DVD9" in meta['dvd_size']: + elif "DVD9" in meta["dvd_size"]: codec = "DVD9" else: codecmap = { @@ -420,17 +545,19 @@ def get_codec(self, meta): "HEVC": "H.265", "H.265": "H.265", } - searchcodec = meta.get('video_codec', meta.get('video_encode')) + searchcodec = meta.get("video_codec", meta.get("video_encode")) codec = codecmap.get(searchcodec, searchcodec) - if meta.get('has_encode_settings') is True: + if meta.get("has_encode_settings") is True: codec = codec.replace("H.", "x") return codec def get_resolution(self, meta): other_res = None - res = meta.get('resolution', "OTHER") - if (res == "OTHER" and meta['is_disc'] != "BDMV") or (meta['sd'] == 1 and meta['type'] == "WEBDL"): - video_mi = meta['mediainfo']['media']['track'][1] + res = meta.get("resolution", "OTHER") + if (res == "OTHER" and meta["is_disc"] != "BDMV") or ( + meta["sd"] == 1 and meta["type"] == "WEBDL" + ): + video_mi = meta["mediainfo"]["media"]["track"][1] other_res = f"{video_mi['Width']}x{video_mi['Height']}" res = "Other" if meta["is_disc"] == "DVD": @@ -441,15 +568,12 @@ def get_container(self, meta): container = None if meta["is_disc"] == "BDMV": container = "m2ts" - elif meta['is_disc'] == "DVD": + elif meta["is_disc"] == "DVD": container = "VOB IFO" else: - ext = os.path.splitext(meta['filelist'][0])[1] - containermap = { - '.mkv': "MKV", - '.mp4': 'MP4' - } - container = containermap.get(ext, 'Other') + ext = os.path.splitext(meta["filelist"][0])[1] + containermap = {".mkv": "MKV", ".mp4": "MP4"} + container = containermap.get(ext, "Other") return container def get_source(self, source): @@ -460,9 +584,9 @@ def get_source(self, source): "HDDVD": "HD-DVD", "Web": "WEB", "HDTV": "HDTV", - 'UHDTV': 'HDTV', + "UHDTV": "HDTV", "NTSC": "DVD", - "PAL": "DVD" + "PAL": "DVD", } source_id = sources.get(source, "OtherR") return source_id @@ -471,24 +595,24 @@ def get_subtitles(self, meta): sub_lang_map = self.sub_lang_map sub_langs = [] - if meta.get('is_disc', '') != 'BDMV': - mi = meta['mediainfo'] - if meta.get('is_disc', '') == "DVD": - mi = json.loads(MediaInfo.parse(meta['discs'][0]['ifo'], output='JSON')) - for track in mi['media']['track']: - if track['@type'] == "Text": - language = track.get('Language_String2', track.get('Language')) + if meta.get("is_disc", "") != "BDMV": + mi = meta["mediainfo"] + if meta.get("is_disc", "") == "DVD": + mi = json.loads(MediaInfo.parse(meta["discs"][0]["ifo"], output="JSON")) + for track in mi["media"]["track"]: + if track["@type"] == "Text": + language = track.get("Language_String2", track.get("Language")) if language == "en": - if track.get('Forced', "") == "Yes": + if track.get("Forced", "") == "Yes": language = "en (Forced)" - title = track.get('Title', "") + title = track.get("Title", "") if isinstance(title, str) and "intertitles" in title.lower(): language = "en (Intertitles)" for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) else: - for language in meta['bdinfo']['subtitles']: + for language in meta["bdinfo"]["subtitles"]: for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) @@ -503,20 +627,25 @@ def get_trumpable(self, sub_langs): "English Hardcoded Subs (Forced)": 50, "No English Subs": 14, "English Softsubs Exist (Mislabeled)": None, - "Hardcoded Subs (Non-English)": "OTHER" + "Hardcoded Subs (Non-English)": "OTHER", } - opts = cli_ui.select_choices("English subtitles not found. Please select any/all applicable options:", choices=list(trumpable_values.keys())) + opts = cli_ui.select_choices( + "English subtitles not found. Please select any/all applicable options:", + choices=list(trumpable_values.keys()), + ) trumpable = [] if opts: for t, v in trumpable_values.items(): - if t in ''.join(opts): + if t in "".join(opts): if v is None: break elif v != 50: # Hardcoded, Forced trumpable.append(v) elif v == "OTHER": # Hardcoded, Non-English trumpable.append(14) - hc_sub_langs = cli_ui.ask_string("Enter language code for HC Subtitle languages") + hc_sub_langs = cli_ui.ask_string( + "Enter language code for HC Subtitle languages" + ) for lang, subID in self.sub_lang_map.items(): if any(hc_sub_langs.strip() == x for x in list(lang)): sub_langs.append(subID) @@ -534,64 +663,68 @@ def get_remaster_title(self, meta): remaster_title = [] # Collections # Masters of Cinema, The Criterion Collection, Warner Archive Collection - if meta.get('distributor') in ('WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC'): - remaster_title.append('Warner Archive Collection') - elif meta.get('distributor') in ('CRITERION', 'CRITERION COLLECTION', 'CC'): - remaster_title.append('The Criterion Collection') - elif meta.get('distributor') in ('MASTERS OF CINEMA', 'MOC'): - remaster_title.append('Masters of Cinema') + if meta.get("distributor") in ( + "WARNER ARCHIVE", + "WARNER ARCHIVE COLLECTION", + "WAC", + ): + remaster_title.append("Warner Archive Collection") + elif meta.get("distributor") in ("CRITERION", "CRITERION COLLECTION", "CC"): + remaster_title.append("The Criterion Collection") + elif meta.get("distributor") in ("MASTERS OF CINEMA", "MOC"): + remaster_title.append("Masters of Cinema") # Editions # Director's Cut, Extended Edition, Rifftrax, Theatrical Cut, Uncut, Unrated - if "director's cut" in meta.get('edition', '').lower(): + if "director's cut" in meta.get("edition", "").lower(): remaster_title.append("Director's Cut") - elif "extended" in meta.get('edition', '').lower(): + elif "extended" in meta.get("edition", "").lower(): remaster_title.append("Extended Edition") - elif "theatrical" in meta.get('edition', '').lower(): + elif "theatrical" in meta.get("edition", "").lower(): remaster_title.append("Theatrical Cut") - elif "rifftrax" in meta.get('edition', '').lower(): + elif "rifftrax" in meta.get("edition", "").lower(): remaster_title.append("Theatrical Cut") - elif "uncut" in meta.get('edition', '').lower(): + elif "uncut" in meta.get("edition", "").lower(): remaster_title.append("Uncut") - elif "unrated" in meta.get('edition', '').lower(): + elif "unrated" in meta.get("edition", "").lower(): remaster_title.append("Unrated") else: - if meta.get('edition') not in ('', None): - remaster_title.append(meta['edition']) + if meta.get("edition") not in ("", None): + remaster_title.append(meta["edition"]) # Features # 2-Disc Set, 2in1, 2D/3D Edition, 3D Anaglyph, 3D Full SBS, 3D Half OU, 3D Half SBS, # 4K Restoration, 4K Remaster, # Extras, Remux, - if meta.get('type') == "REMUX": + if meta.get("type") == "REMUX": remaster_title.append("Remux") # DTS:X, Dolby Atmos, Dual Audio, English Dub, With Commentary, - if "DTS:X" in meta['audio']: - remaster_title.append('DTS:X') - if "Atmos" in meta['audio']: - remaster_title.append('Dolby Atmos') - if "Dual" in meta['audio']: - remaster_title.append('Dual Audio') - if "Dubbed" in meta['audio']: - remaster_title.append('English Dub') - if meta.get('has_commentary', False) is True: - remaster_title.append('With Commentary') + if "DTS:X" in meta["audio"]: + remaster_title.append("DTS:X") + if "Atmos" in meta["audio"]: + remaster_title.append("Dolby Atmos") + if "Dual" in meta["audio"]: + remaster_title.append("Dual Audio") + if "Dubbed" in meta["audio"]: + remaster_title.append("English Dub") + if meta.get("has_commentary", False) is True: + remaster_title.append("With Commentary") # HDR10, HDR10+, Dolby Vision, 10-bit, # if "Hi10P" in meta.get('video_encode', ''): # remaster_title.append('10-bit') - if meta.get('hdr', '').strip() == '' and meta.get('bit_depth') == '10': - remaster_title.append('10-bit') - if "HDR" in meta.get('hdr', ''): - if "HDR10+" in meta['hdr']: - remaster_title.append('HDR10+') + if meta.get("hdr", "").strip() == "" and meta.get("bit_depth") == "10": + remaster_title.append("10-bit") + if "HDR" in meta.get("hdr", ""): + if "HDR10+" in meta["hdr"]: + remaster_title.append("HDR10+") else: - remaster_title.append('HDR10') - if "DV" in meta.get('hdr', ''): - remaster_title.append('Dolby Vision') - if "HLG" in meta.get('hdr', ''): - remaster_title.append('HLG') + remaster_title.append("HDR10") + if "DV" in meta.get("hdr", ""): + remaster_title.append("Dolby Vision") + if "HLG" in meta.get("hdr", ""): + remaster_title.append("HLG") if remaster_title != []: output = " / ".join(remaster_title) @@ -601,7 +734,9 @@ def get_remaster_title(self, meta): def convert_bbcode(self, desc): desc = desc.replace("[spoiler", "[hide").replace("[/spoiler]", "[/hide]") - desc = desc.replace("[center]", "[align=center]").replace("[/center]", "[/align]") + desc = desc.replace("[center]", "[align=center]").replace( + "[/center]", "[/align]" + ) desc = desc.replace("[left]", "[align=left]").replace("[/left]", "[/align]") desc = desc.replace("[right]", "[align=right]").replace("[/right]", "[/align]") desc = desc.replace("[code]", "[quote]").replace("[/code]", "[/quote]") @@ -609,37 +744,65 @@ def convert_bbcode(self, desc): async def edit_desc(self, meta): from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: - images = meta['image_list'] - discs = meta.get('discs', []) + + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc: + images = meta["image_list"] + discs = meta.get("discs", []) # For Discs if len(discs) >= 1: for i in range(len(discs)): each = discs[i] - if each['type'] == "BDMV": + if each["type"] == "BDMV": desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") if i == 0: base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") - mi_dump = each['summary'] + mi_dump = each["summary"] else: - mi_dump = each['summary'] - if meta.get('vapoursynth', False) is True: + mi_dump = each["summary"] + if meta.get("vapoursynth", False) is True: use_vs = True else: use_vs = False - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) + ds = multiprocessing.Process( + target=prep.disc_screenshots, + args=( + f"FILE_{i}", + each["bdinfo"], + meta["uuid"], + meta["base_dir"], + use_vs, + [], + meta.get("ffdebug", False), + 2, + ), + ) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - if each['type'] == "DVD": + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", + f"FILE_{i}-*.png", + ) + images, dummy = prep.upload_screens( + meta, 2, 1, 0, 2, new_screens, {} + ) + + if each["type"] == "DVD": desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n") @@ -650,45 +813,92 @@ async def edit_desc(self, meta): desc.write(base2ptp) desc.write("\n\n") else: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) + ds = multiprocessing.Process( + target=prep.dvd_screenshots, args=(meta, i, 2) + ) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", + f"{meta['discs'][i]['name']}-*.png", + ) + images, dummy = prep.upload_screens( + meta, 2, 1, 0, 2, new_screens, {} + ) if len(images) > 0: - for each in range(len(images[:int(meta['screens'])])): - raw_url = images[each]['raw_url'] + for each in range(len(images[: int(meta["screens"])])): + raw_url = images[each]["raw_url"] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") # For non-discs - elif len(meta.get('filelist', [])) >= 1: - for i in range(len(meta['filelist'])): - file = meta['filelist'][i] + elif len(meta.get("filelist", [])) >= 1: + for i in range(len(meta["filelist"])): + file = meta["filelist"][i] if i == 0: # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: - desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + if ( + meta["type"] == "WEBDL" + and meta.get("service_longname", "") != "" + and meta.get("description", None) is None + and self.web_source is True + ): + desc.write( + f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]" + ) + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() else: # Export Mediainfo - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: + mi_dump = MediaInfo.parse( + file, + output="STRING", + full=False, + mediainfo_options={"inform_version": "1"}, + ) + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", + "w", + newline="", + encoding="utf-8", + ) as f: f.write(mi_dump) - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "r", encoding="utf-8").read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() # Generate and upload screens for other files # Add force_screenshots=True to ensure screenshots are taken even if images exist - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2, True)) + s = multiprocessing.Process( + target=prep.screenshots, + args=( + file, + f"FILE_{i}", + meta["uuid"], + meta["base_dir"], + meta, + 2, + True, + ), + ) s.start() while s.is_alive() is True: await asyncio.sleep(3) # Upload new screenshots - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + new_screens = glob.glob1( + f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png" + ) + images, dummy = prep.upload_screens( + meta, 2, 1, 0, 2, new_screens, {} + ) # Write MediaInfo and screenshots to the description desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") @@ -700,8 +910,8 @@ async def edit_desc(self, meta): desc.write("\n\n") if len(images) > 0: - for each in range(len(images[:int(meta['screens'])])): - raw_url = images[each]['raw_url'] + for each in range(len(images[: int(meta["screens"])])): + raw_url = images[each]["raw_url"] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") @@ -712,16 +922,21 @@ async def get_AntiCsrfToken(self, meta): with requests.Session() as session: loggedIn = False if os.path.exists(cookiefile): - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) uploadresponse = session.get("https://passthepopcorn.me/upload.php") loggedIn = await self.validate_login(uploadresponse) else: console.print("[yellow]PTP Cookies not found. Creating new session.") if loggedIn is True: - AntiCsrfToken = re.search(r'data-AntiCsrfToken="(.*)"', uploadresponse.text).group(1) + AntiCsrfToken = re.search( + r'data-AntiCsrfToken="(.*)"', uploadresponse.text + ).group(1) else: - passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce", self.announce_url).group(1) + passKey = re.match( + r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce", + self.announce_url, + ).group(1) data = { "username": self.username, "password": self.password, @@ -729,34 +944,54 @@ async def get_AntiCsrfToken(self, meta): "keeplogged": "1", } headers = {"User-Agent": self.user_agent} - loginresponse = session.post("https://passthepopcorn.me/ajax.php?action=login", data=data, headers=headers) + loginresponse = session.post( + "https://passthepopcorn.me/ajax.php?action=login", + data=data, + headers=headers, + ) await asyncio.sleep(2) try: resp = loginresponse.json() - if resp['Result'] == "TfaRequired": - data['TfaType'] = "normal" - data['TfaCode'] = cli_ui.ask_string("2FA Required: Please enter 2FA code") - loginresponse = session.post("https://passthepopcorn.me/ajax.php?action=login", data=data, headers=headers) + if resp["Result"] == "TfaRequired": + data["TfaType"] = "normal" + data["TfaCode"] = cli_ui.ask_string( + "2FA Required: Please enter 2FA code" + ) + loginresponse = session.post( + "https://passthepopcorn.me/ajax.php?action=login", + data=data, + headers=headers, + ) await asyncio.sleep(2) resp = loginresponse.json() try: if resp["Result"] != "Ok": - raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") # noqa F405 + raise LoginException( + "Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code." + ) # noqa F405 AntiCsrfToken = resp["AntiCsrfToken"] - with open(cookiefile, 'wb') as cf: + with open(cookiefile, "wb") as cf: pickle.dump(session.cookies, cf) except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 + raise LoginException( + f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}" + ) # noqa F405 except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 + raise LoginException( + f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}" + ) # noqa F405 return AntiCsrfToken async def validate_login(self, response): loggedIn = False if response.text.find("""""") != -1: - console.print("Looks like you are not logged in to PTP. Probably due to the bad user name, password, or expired session.") + console.print( + "Looks like you are not logged in to PTP. Probably due to the bad user name, password, or expired session." + ) elif "Your popcorn quota has been reached, come back later!" in response.text: - raise LoginException("Your PTP request/popcorn quota has been reached, try again later") # noqa F405 + raise LoginException( + "Your PTP request/popcorn quota has been reached, try again later" + ) # noqa F405 else: loggedIn = True return loggedIn @@ -766,38 +1001,42 @@ async def fill_upload_form(self, groupID, meta): await common.edit_torrent(meta, self.tracker, self.source_flag) resolution, other_resolution = self.get_resolution(meta) await self.edit_desc(meta) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() ptp_subtitles = self.get_subtitles(meta) ptp_trumpable = None - if not any(x in [3, 50] for x in ptp_subtitles) or meta['hardcoded-subs']: + if not any(x in [3, 50] for x in ptp_subtitles) or meta["hardcoded-subs"]: ptp_trumpable, ptp_subtitles = self.get_trumpable(ptp_subtitles) data = { "submit": "true", "remaster_year": "", "remaster_title": self.get_remaster_title(meta), # Eg.: Hardcoded English - "type": self.get_type(meta['imdb_info'], meta), + "type": self.get_type(meta["imdb_info"], meta), "codec": "Other", # Sending the codec as custom. "other_codec": self.get_codec(meta), "container": "Other", "other_container": self.get_container(meta), "resolution": resolution, "source": "Other", # Sending the source as custom. - "other_source": self.get_source(meta['source']), + "other_source": self.get_source(meta["source"]), "release_desc": desc, "nfo_text": "", "subtitles[]": ptp_subtitles, "trumpable[]": ptp_trumpable, - "AntiCsrfToken": await self.get_AntiCsrfToken(meta) + "AntiCsrfToken": await self.get_AntiCsrfToken(meta), } if data["remaster_year"] != "" or data["remaster_title"] != "": data["remaster"] = "on" if resolution == "Other": data["other_resolution"] = other_resolution - if meta.get('personalrelease', False) is True: + if meta.get("personalrelease", False) is True: data["internalrip"] = "on" # IF SPECIAL (idk how to check for this automatically) - # data["special"] = "on" - if int(str(meta.get("imdb_id", "0")).replace('tt', '')) == 0: + # data["special"] = "on" + if int(str(meta.get("imdb_id", "0")).replace("tt", "")) == 0: data["imdb"] = "0" else: data["imdb"] = meta["imdb_id"] @@ -810,31 +1049,40 @@ async def fill_upload_form(self, groupID, meta): tinfo = await self.get_torrent_info(meta.get("imdb_id", "0"), meta) cover = meta["imdb_info"].get("cover") if cover is None: - cover = meta.get('poster') + cover = meta.get("poster") if cover is not None and "ptpimg" not in cover: cover = await self.ptpimg_url_rehost(cover) while cover is None: - cover = cli_ui.ask_string("No Poster was found. Please input a link to a poster: \n", default="") - if "ptpimg" not in str(cover) and str(cover).endswith(('.jpg', '.png')): + cover = cli_ui.ask_string( + "No Poster was found. Please input a link to a poster: \n", + default="", + ) + if "ptpimg" not in str(cover) and str(cover).endswith((".jpg", ".png")): cover = await self.ptpimg_url_rehost(cover) new_data = { - "title": tinfo.get("title", meta["imdb_info"].get("title", meta["title"])), + "title": tinfo.get( + "title", meta["imdb_info"].get("title", meta["title"]) + ), "year": tinfo.get("year", meta["imdb_info"].get("year", meta["year"])), "image": cover, "tags": tinfo.get("tags", ""), "album_desc": tinfo.get("plot", meta.get("overview", "")), "trailer": meta.get("youtube", ""), } - if new_data['year'] in ['', '0', 0, None] and meta.get('manual_year') not in [0, '', None]: - new_data['year'] = meta['manual_year'] + if new_data["year"] in ["", "0", 0, None] and meta.get( + "manual_year" + ) not in [0, "", None]: + new_data["year"] = meta["manual_year"] while new_data["tags"] == "": - if meta.get('mode', 'discord') == 'cli': - console.print('[yellow]Unable to match any tags') + if meta.get("mode", "discord") == "cli": + console.print("[yellow]Unable to match any tags") console.print("Valid tags can be found on the PTP upload form") - new_data["tags"] = console.input("Please enter at least one tag. Comma seperated (action, animation, short):") + new_data["tags"] = console.input( + "Please enter at least one tag. Comma seperated (action, animation, short):" + ) data.update(new_data) if meta["imdb_info"].get("directors", None) is not None: - data["artist[]"] = tuple(meta['imdb_info'].get('directors')) + data["artist[]"] = tuple(meta["imdb_info"].get("directors")) data["importance[]"] = "1" else: # Upload on existing group url = f"https://passthepopcorn.me/upload.php?groupid={groupID}" @@ -849,13 +1097,18 @@ async def upload(self, meta, url, data, disctype): # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed if torrent.piece_size > 16777216: # 16 MiB in bytes - console.print("[red]Piece size is OVER 16M and does not work on PTP. Generating a new .torrent") + console.print( + "[red]Piece size is OVER 16M and does not work on PTP. Generating a new .torrent" + ) # Import Prep and regenerate the torrent with 16 MiB piece size limit from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - if meta['is_disc']: + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) + + if meta["is_disc"]: include = [] exclude = [] else: @@ -864,9 +1117,12 @@ async def upload(self, meta, url, data, disctype): # Create a new torrent with piece size explicitly set to 8 MiB from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) new_torrent = prep.CustomTorrent( - path=Path(meta['path']), + path=Path(meta["path"]), trackers=[self.announce_url], source="L4G", private=True, @@ -874,12 +1130,14 @@ async def upload(self, meta, url, data, disctype): include_globs=include, # Ensure this is always a list creation_date=datetime.now(), comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" + created_by="L4G's Upload Assistant", ) # Explicitly set the piece size and update metainfo new_torrent.piece_size = 16777216 # 16 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + new_torrent.metainfo["info"][ + "piece length" + ] = 16777216 # Ensure 'piece length' is set # Validate and write the new torrent new_torrent.validate_piece_size() @@ -887,39 +1145,55 @@ async def upload(self, meta, url, data, disctype): new_torrent.write(torrent_path, overwrite=True) # Proceed with the upload process - with open(torrent_path, 'rb') as torrentFile: + with open(torrent_path, "rb") as torrentFile: files = { - "file_input": ("placeholder.torrent", torrentFile, "application/x-bittorent") + "file_input": ( + "placeholder.torrent", + torrentFile, + "application/x-bittorent", + ) } headers = { # 'ApiUser' : self.api_user, # 'ApiKey' : self.api_key, "User-Agent": self.user_agent } - if meta['debug']: + if meta["debug"]: console.log(url) console.log(data) else: with requests.Session() as session: cookiefile = f"{meta['base_dir']}/data/cookies/PTP.pickle" - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) - response = session.post(url=url, data=data, headers=headers, files=files) + response = session.post( + url=url, data=data, headers=headers, files=files + ) console.print(f"[cyan]{response.url}") responsetext = response.text # If the response contains our announce URL, then we are on the upload page and the upload wasn't successful. if responsetext.find(self.announce_url) != -1: # Get the error message. errorMessage = "" - match = re.search(r"""
= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/RF.py b/src/trackers/RF.py index c90f8b4db..09d834351 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -9,7 +9,7 @@ from src.console import console -class RF(): +class RF: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,10 +20,10 @@ class RF(): def __init__(self, config): self.config = config - self.tracker = 'RF' - self.source_flag = 'ReelFliX' - self.upload_url = 'https://reelflix.xyz/api/torrents/upload' - self.search_url = 'https://reelflix.xyz/api/torrents/filter' + self.tracker = "RF" + self.source_flag = "ReelFliX" + self.upload_url = "https://reelflix.xyz/api/torrents/upload" + self.search_url = "https://reelflix.xyz/api/torrents/filter" self.forum_link = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass @@ -32,68 +32,98 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': stt_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": stt_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id + data["distributor_id"] = distributor_id headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - if meta.get('category') == "TV": - console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} + if meta.get("category") == "TV": + console.print("[bold red]This site only ALLOWS Movies.") + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -105,69 +135,73 @@ async def upload(self, meta, disctype): open_torrent.close() async def edit_name(self, meta): - stt_name = meta['name'] + stt_name = meta["name"] return stt_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - }.get(category_name, '0') + "MOVIE": "1", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '43', - 'REMUX': '40', - 'WEBDL': '42', - 'WEBRIP': '45', + "DISC": "43", + "REMUX": "40", + "WEBDL": "42", + "WEBRIP": "45", # 'FANRES': '6', - 'ENCODE': '41', - 'HDTV': '35', - }.get(type, '0') + "ENCODE": "41", + "HDTV": "35", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { # '8640p':'10', - '4320p': '1', - '2160p': '2', + "4320p": "1", + "2160p": "2", # '1440p' : '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies') + if meta["category"] == "TV": + console.print( + "[bold red]Unable to search site for TV as this site only ALLOWS Movies" + ) # params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] + if meta.get("edition", "") != "": + params["name"] = params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index b5ddf485f..a36ecea35 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -11,7 +11,7 @@ from src.console import console -class RTF(): +class RTF: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -19,13 +19,16 @@ class RTF(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'RTF' - self.source_flag = 'sunshine' - self.upload_url = 'https://retroflix.club/api/upload' - self.search_url = 'https://retroflix.club/api/torrent' - self.forum_link = 'https://retroflix.club/forums.php?action=viewtopic&topicid=3619' + self.tracker = "RTF" + self.source_flag = "sunshine" + self.upload_url = "https://retroflix.club/api/upload" + self.search_url = "https://retroflix.club/api/torrent" + self.forum_link = ( + "https://retroflix.club/forums.php?action=viewtopic&topicid=3619" + ) self.banned_groups = [] pass @@ -33,58 +36,89 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None screenshots = [] - for image in meta['image_list']: - if image['raw_url'] is not None: - screenshots.append(image['raw_url']) + for image in meta["image_list"]: + if image["raw_url"] is not None: + screenshots.append(image["raw_url"]) json_data = { - 'name': meta['name'], + "name": meta["name"], # description does not work for some reason # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", - 'description': "this is a description", + "description": "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump is None else f"{bd_dump}", + "mediaInfo": ( + re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) + if bd_dump is None + else f"{bd_dump}" + ), "nfo": "", - "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", + "url": "https://www.imdb.com/title/" + + ( + meta["imdb_id"] + if str(meta["imdb_id"]).startswith("tt") + else "tt" + meta["imdb_id"] + ) + + "/", # auto pulled from IMDB "descr": "This is short description", "poster": meta["poster"] if meta["poster"] is not None else "", - "type": "401" if meta['category'] == 'MOVIE'else "402", + "type": "401" if meta["category"] == "MOVIE" else "402", "screenshots": screenshots, - 'isAnonymous': self.config['TRACKERS'][self.tracker]["anon"], + "isAnonymous": self.config["TRACKERS"][self.tracker]["anon"], } - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as binary_file: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) as binary_file: binary_file_data = binary_file.read() base64_encoded_data = base64.b64encode(binary_file_data) - base64_message = base64_encoded_data.decode('utf-8') - json_data['file'] = base64_message + base64_message = base64_encoded_data.decode("utf-8") + json_data["file"] = base64_message headers = { - 'accept': 'application/json', - 'Content-Type': 'application/json', - 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + "accept": "application/json", + "Content-Type": "application/json", + "Authorization": self.config["TRACKERS"][self.tracker]["api_key"].strip(), } - if datetime.date.today().year - meta['year'] <= 9: - console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") + if datetime.date.today().year - meta["year"] <= 9: + console.print( + "[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules" + ) return - if meta['debug'] is False: - response = requests.post(url=self.upload_url, json=json_data, headers=headers) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, json=json_data, headers=headers + ) try: console.print(response.json()) - t_id = response.json()['torrent']['id'] - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://retroflix.club/browse/t/" + str(t_id)) + t_id = response.json()["torrent"]["id"] + await common.add_tracker_torrent( + meta, + self.tracker, + self.source_flag, + self.config["TRACKERS"][self.tracker].get("announce_url"), + "https://retroflix.club/browse/t/" + str(t_id), + ) except Exception: console.print("It may have uploaded, go check") @@ -97,27 +131,33 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") headers = { - 'accept': 'application/json', - 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + "accept": "application/json", + "Authorization": self.config["TRACKERS"][self.tracker]["api_key"].strip(), } - params = { - 'includingDead': '1' - } + params = {"includingDead": "1"} - if meta['imdb_id'] != "0": - params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] + if meta["imdb_id"] != "0": + params["imdbId"] = ( + meta["imdb_id"] + if str(meta["imdb_id"]).startswith("tt") + else "tt" + meta["imdb_id"] + ) else: - params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') + params["search"] = ( + meta["title"].replace(":", "").replace("'", "").replace(",", "") + ) try: response = requests.get(url=self.search_url, params=params, headers=headers) response = response.json() for each in response: - result = [each][0]['name'] + result = [each][0]["name"] dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes @@ -125,67 +165,77 @@ async def search_existing(self, meta, disctype): # Tests if stored API key is valid. Site API key expires every week so a new one has to be generated. async def api_test(self, meta): headers = { - 'accept': 'application/json', - 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + "accept": "application/json", + "Authorization": self.config["TRACKERS"][self.tracker]["api_key"].strip(), } - response = requests.get('https://retroflix.club/api/test', headers=headers) + response = requests.get("https://retroflix.club/api/test", headers=headers) if response.status_code != 200: - console.print('[bold red]Your API key is incorrect SO generating a new one') + console.print("[bold red]Your API key is incorrect SO generating a new one") await self.generate_new_api(meta) else: return async def generate_new_api(self, meta): headers = { - 'accept': 'application/json', + "accept": "application/json", } json_data = { - 'username': self.config['TRACKERS'][self.tracker]['username'], - 'password': self.config['TRACKERS'][self.tracker]['password'], + "username": self.config["TRACKERS"][self.tracker]["username"], + "password": self.config["TRACKERS"][self.tracker]["password"], } - base_dir = meta.get('base_dir', '.') + base_dir = meta.get("base_dir", ".") config_path = f"{base_dir}/data/config.py" try: async with httpx.AsyncClient() as client: - response = await client.post('https://retroflix.club/api/login', headers=headers, json=json_data) + response = await client.post( + "https://retroflix.club/api/login", headers=headers, json=json_data + ) if response.status_code == 201: token = response.json().get("token") if token: - console.print('[bold green]Saving and using New API key generated for this upload') - console.print(f'[bold yellow]{token}') + console.print( + "[bold green]Saving and using New API key generated for this upload" + ) + console.print(f"[bold yellow]{token}") # Update the in-memory config dictionary - self.config['TRACKERS'][self.tracker]['api_key'] = token + self.config["TRACKERS"][self.tracker]["api_key"] = token # Now we update the config file on disk using utf-8 encoding - with open(config_path, 'r', encoding='utf-8') as file: + with open(config_path, "r", encoding="utf-8") as file: config_data = file.read() # Find the RTF tracker and replace the api_key value new_config_data = re.sub( r'("RTF":\s*{[^}]*"api_key":\s*\')[^\']*(\'[^\}]*})', # Match the api_key content only between single quotes - rf'\1{token}\2', # Replace only the content inside the quotes without adding extra backslashes - config_data + rf"\1{token}\2", # Replace only the content inside the quotes without adding extra backslashes + config_data, ) # Write the updated config back to the file - with open(config_path, 'w', encoding='utf-8') as file: + with open(config_path, "w", encoding="utf-8") as file: file.write(new_config_data) - console.print(f'[bold green]API Key successfully saved to {config_path}') + console.print( + f"[bold green]API Key successfully saved to {config_path}" + ) else: - console.print('[bold red]API response does not contain a token.') + console.print("[bold red]API response does not contain a token.") else: - console.print(f'[bold red]Error getting new API key: {response.status_code}, please check username and password in the config.') + console.print( + f"[bold red]Error getting new API key: {response.status_code}, please check username and password in the config." + ) except httpx.RequestError as e: - console.print(f'[bold red]An error occurred while requesting the API: {str(e)}') + console.print( + f"[bold red]An error occurred while requesting the API: {str(e)}" + ) except Exception as e: - console.print(f'[bold red]An unexpected error occurred: {str(e)}') + console.print(f"[bold red]An unexpected error occurred: {str(e)}") diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index 6862b431f..9484d0a6b 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -9,7 +9,7 @@ from src.console import console -class SHRI(): +class SHRI: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -20,116 +20,146 @@ class SHRI(): def __init__(self, config): self.config = config - self.tracker = 'SHRI' - self.source_flag = 'Shareisland' - self.upload_url = 'https://shareisland.org/api/torrents/upload' - self.search_url = 'https://shareisland.org/api/torrents/filter' + self.tracker = "SHRI" + self.source_flag = "Shareisland" + self.upload_url = "https://shareisland.org/api/torrents/upload" + self.search_url = "https://shareisland.org/api/torrents/filter" self.signature = "\n[center][url=https://shareisland.org]Created by SHRI Upload Assistant[/url][/center]" self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '26', - 'REMUX': '7', - 'WEBDL': '27', - 'WEBRIP': '27', - 'HDTV': '6', - 'ENCODE': '15' - }.get(type, '0') + "DISC": "26", + "REMUX": "7", + "WEBDL": "27", + "WEBRIP": "27", + "HDTV": "6", + "ENCODE": "15", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -144,25 +174,27 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 199ff68e0..efaad1632 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -6,7 +6,7 @@ from src.console import console -class SN(): +class SN: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -14,24 +14,27 @@ class SN(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'SN' - self.source_flag = 'Swarmazon' - self.upload_url = 'https://swarmazon.club/api/upload.php' - self.forum_link = 'https://swarmazon.club/php/forum.php?forum_page=2-swarmazon-rules' - self.search_url = 'https://swarmazon.club/api/search.php' + self.tracker = "SN" + self.source_flag = "Swarmazon" + self.upload_url = "https://swarmazon.club/api/upload.php" + self.forum_link = ( + "https://swarmazon.club/php/forum.php?forum_page=2-swarmazon-rules" + ) + self.search_url = "https://swarmazon.club/api/search.php" self.banned_groups = [""] pass async def get_type_id(self, type): type_id = { - 'BluRay': '3', - 'Web': '1', + "BluRay": "3", + "Web": "1", # boxset is 4 # 'NA': '4', - 'DVD': '2' - }.get(type, '0') + "DVD": "2", + }.get(type, "0") return type_id async def upload(self, meta, disctype): @@ -42,34 +45,47 @@ async def upload(self, meta, disctype): cat_id = "" sub_cat_id = "" # cat_id = await self.get_cat_id(meta) - if meta['category'] == 'MOVIE': + if meta["category"] == "MOVIE": cat_id = 1 # sub cat is source so using source to get - sub_cat_id = await self.get_type_id(meta['source']) - elif meta['category'] == 'TV': + sub_cat_id = await self.get_type_id(meta["source"]) + elif meta["category"] == "TV": cat_id = 2 - if meta['tv_pack']: + if meta["tv_pack"]: sub_cat_id = 6 else: sub_cat_id = 5 # todo need to do a check for docs and add as subcat - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as f: + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) as f: tfile = f.read() f.close() # uploading torrent file. - files = { - 'torrent': (f"{meta['name']}.torrent", tfile) - } + files = {"torrent": (f"{meta['name']}.torrent", tfile)} # adding bd_dump to description if it exits and adding empty string to mediainfo if bd_dump: @@ -77,21 +93,22 @@ async def upload(self, meta, disctype): mi_dump = "" data = { - 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': meta['name'], - 'category_id': cat_id, - 'type_id': sub_cat_id, - 'media_ref': f"tt{meta['imdb_id']}", - 'description': desc, - 'media_info': mi_dump - + "api_key": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "name": meta["name"], + "category_id": cat_id, + "type_id": sub_cat_id, + "media_ref": f"tt{meta['imdb_id']}", + "description": desc, + "media_info": mi_dump, } - if meta['debug'] is False: - response = requests.request("POST", url=self.upload_url, data=data, files=files) + if meta["debug"] is False: + response = requests.request( + "POST", url=self.upload_url, data=data, files=files + ) try: - if response.json().get('success'): + if response.json().get("success"): console.print(response.json()) else: console.print("[red]Did not upload successfully") @@ -106,18 +123,28 @@ async def upload(self, meta, disctype): console.print(data) async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc: desc.write(base) - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: desc.write("[center]") for each in range(len(images)): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] desc.write(f"[url={web_url}][img=720]{img_url}[/img][/url]") desc.write("[/center]") - desc.write(f"\n[center][url={self.forum_link}]Simplicity, Socializing and Sharing![/url][/center]") + desc.write( + f"\n[center][url={self.forum_link}]Simplicity, Socializing and Sharing![/url][/center]" + ) desc.close() return @@ -125,33 +152,42 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") - params = { - 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } + params = {"api_key": self.config["TRACKERS"][self.tracker]["api_key"].strip()} # using title if IMDB id does not exist to search - if meta['imdb_id'] == 0: - if meta['category'] == 'TV': - params['filter'] = meta['title'] + f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] + if meta["imdb_id"] == 0: + if meta["category"] == "TV": + params["filter"] = ( + meta["title"] + + f"{meta.get('season', '')}{meta.get('episode', '')}" + + " " + + meta["resolution"] + ) else: - params['filter'] = meta['title'] + params["filter"] = meta["title"] else: # using IMDB_id to search if it exists. - if meta['category'] == 'TV': - params['media_ref'] = f"tt{meta['imdb_id']}" - params['filter'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] + if meta["category"] == "TV": + params["media_ref"] = f"tt{meta['imdb_id']}" + params["filter"] = ( + f"{meta.get('season', '')}{meta.get('episode', '')}" + + " " + + meta["resolution"] + ) else: - params['media_ref'] = f"tt{meta['imdb_id']}" - params['filter'] = meta['resolution'] + params["media_ref"] = f"tt{meta['imdb_id']}" + params["filter"] = meta["resolution"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for i in response['data']: - result = i['name'] + for i in response["data"]: + result = i["name"] dupes.append(result) except Exception: - console.print('[red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/STC.py b/src/trackers/STC.py index fb17b2c0a..ad8513be3 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -8,7 +8,7 @@ from src.console import console -class STC(): +class STC: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -16,13 +16,14 @@ class STC(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'STC' - self.source_flag = 'STC' - self.upload_url = 'https://skipthecommericals.xyz/api/torrents/upload' - self.search_url = 'https://skipthecommericals.xyz/api/torrents/filter' - self.signature = '\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]' + self.tracker = "STC" + self.source_flag = "STC" + self.upload_url = "https://skipthecommericals.xyz/api/torrents/upload" + self.search_url = "https://skipthecommericals.xyz/api/torrents/filter" + self.signature = "\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]" self.banned_groups = [""] pass @@ -30,64 +31,99 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id( + meta["type"], + meta.get("tv_pack", 0), + meta.get("sd", 0), + meta.get("category", ""), + ) + resolution_id = await self.get_res_id(meta["resolution"]) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': stc_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": stc_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) @@ -101,84 +137,91 @@ async def upload(self, meta, disctype): open_torrent.close() async def edit_name(self, meta): - stc_name = meta.get('name') + stc_name = meta.get("name") return stc_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type, tv_pack, sd, category): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") if tv_pack == 1: if sd == 1: # Season SD - type_id = '14' + type_id = "14" if type == "ENCODE": - type_id = '18' + type_id = "18" if sd == 0: # Season HD - type_id = '13' + type_id = "13" if type == "ENCODE": - type_id = '18' + type_id = "18" if type == "DISC" and category == "TV": if sd == 1: # SD-RETAIL - type_id = '17' + type_id = "17" if sd == 0: # HD-RETAIL - type_id = '18' + type_id = "18" return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id( + meta["type"], + meta.get("tv_pack", 0), + meta.get("sd", 0), + meta.get("category", ""), + ), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] + meta['edition'] + if meta["category"] == "TV": + params["name"] = f"{meta.get('season', '')}{meta.get('episode', '')}" + if meta.get("edition", "") != "": + params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 2f8ee800d..9d96a1abd 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -9,7 +9,7 @@ from src.console import console -class STT(): +class STT: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -17,13 +17,14 @@ class STT(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'STT' - self.source_flag = 'STT' - self.search_url = 'https://skipthetrailers.xyz/api/torrents/filter' - self.upload_url = 'https://skipthetrailers.xyz/api/torrents/upload' - self.signature = '\n[center][url=https://skipthetrailers.xyz/pages/1]Please Seed[/url][/center]' + self.tracker = "STT" + self.source_flag = "STT" + self.search_url = "https://skipthetrailers.xyz/api/torrents/filter" + self.upload_url = "https://skipthetrailers.xyz/api/torrents/upload" + self.signature = "\n[center][url=https://skipthetrailers.xyz/pages/1]Please Seed[/url][/center]" self.banned_groups = [""] pass @@ -31,62 +32,92 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': stt_name, - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": stt_name, + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 - if meta.get('category') == "TV": - console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta.get("category") == "TV": + console.print("[bold red]This site only ALLOWS Movies.") + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -98,68 +129,72 @@ async def upload(self, meta, disctype): open_torrent.close() async def edit_name(self, meta): - stt_name = meta['name'] + stt_name = meta["name"] return stt_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - }.get(category_name, '0') + "MOVIE": "1", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'FANRES': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "FANRES": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { # '8640p':'10', - '4320p': '1', - '2160p': '2', + "4320p": "1", + "2160p": "2", # '1440p' : '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '11') + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "11") return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies.') + if meta["category"] == "TV": + console.print( + "[bold red]Unable to search site for TV as this site only ALLOWS Movies." + ) # params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] + if meta.get("edition", "") != "": + params["name"] = params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index b2dd45c8e..f3bf9920f 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -8,7 +8,7 @@ from src.console import console -class TDC(): +class TDC: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -19,116 +19,146 @@ class TDC(): def __init__(self, config): self.config = config - self.tracker = 'TDC' - self.source_flag = 'TDC' - self.upload_url = 'https://thedarkcommunity.cc/api/torrents/upload' - self.search_url = 'https://thedarkcommunity.cc/api/torrents/filter' + self.tracker = "TDC" + self.source_flag = "TDC" + self.upload_url = "https://thedarkcommunity.cc/api/torrents/upload" + self.search_url = "https://thedarkcommunity.cc/api/torrents/filter" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -143,28 +173,32 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f"{meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + meta["edition"] try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 4a91b66e7..531a0d2f9 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -13,7 +13,7 @@ from src.console import console -class THR(): +class THR: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -21,10 +21,11 @@ class THR(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.username = config['TRACKERS']['THR'].get('username') - self.password = config['TRACKERS']['THR'].get('password') + self.username = config["TRACKERS"]["THR"].get("username") + self.password = config["TRACKERS"]["THR"].get("password") self.banned_groups = [""] pass @@ -33,77 +34,87 @@ async def upload(self, session, meta, disctype): cat_id = await self.get_cat_id(meta) subs = self.get_subtitles(meta) pronfo = await self.edit_desc(meta) # noqa #F841 - thr_name = unidecode(meta['name'].replace('DD+', 'DDP')) + thr_name = unidecode(meta["name"].replace("DD+", "DDP")) # Confirm the correct naming order for FL cli_ui.info(f"THR name: {thr_name}") - if meta.get('unattended', False) is False: + if meta.get("unattended", False) is False: thr_confirm = cli_ui.ask_yes_no("Correct?", default=False) if thr_confirm is not True: - thr_name_manually = cli_ui.ask_string("Please enter a proper name", default="") + thr_name_manually = cli_ui.ask_string( + "Please enter a proper name", default="" + ) if thr_name_manually == "": - console.print('No proper name given') + console.print("No proper name given") console.print("Aborting...") return else: thr_name = thr_name_manually torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) - if meta.get('is_disc', '') == 'BDMV': + if meta.get("is_disc", "") == "BDMV": mi_file = None # bd_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8' else: - mi_file = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt") - with open(mi_file, 'r') as f: + mi_file = os.path.abspath( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt" + ) + with open(mi_file, "r") as f: mi_file = f.read() f.close() # bd_file = None - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'r', encoding='utf-8') as f: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", + "r", + encoding="utf-8", + ) as f: desc = f.read() f.close() - torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent") - with open(torrent_path, 'rb') as f: + torrent_path = os.path.abspath( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent" + ) + with open(torrent_path, "rb") as f: tfile = f.read() f.close() # Upload Form - url = 'https://www.torrenthr.org/takeupload.php' - files = { - 'tfile': (f'{torrent_name}.torrent', tfile) - } + url = "https://www.torrenthr.org/takeupload.php" + files = {"tfile": (f"{torrent_name}.torrent", tfile)} payload = { - 'name': thr_name, - 'descr': desc, - 'type': cat_id, - 'url': f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", - 'tube': meta.get('youtube', '') + "name": thr_name, + "descr": desc, + "type": cat_id, + "url": f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", + "tube": meta.get("youtube", ""), } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } # If pronfo fails, put mediainfo into THR parser - if meta.get('is_disc', '') != 'BDMV': - files['nfo'] = ("MEDIAINFO.txt", mi_file) + if meta.get("is_disc", "") != "BDMV": + files["nfo"] = ("MEDIAINFO.txt", mi_file) if subs != []: - payload['subs[]'] = tuple(subs) + payload["subs[]"] = tuple(subs) - if meta['debug'] is False: + if meta["debug"] is False: thr_upload_prompt = True else: - thr_upload_prompt = cli_ui.ask_yes_no("send to takeupload.php?", default=False) + thr_upload_prompt = cli_ui.ask_yes_no( + "send to takeupload.php?", default=False + ) if thr_upload_prompt is True: await asyncio.sleep(0.5) response = session.post(url=url, files=files, data=payload, headers=headers) try: - if meta['debug']: + if meta["debug"]: console.print(response.text) - if response.url.endswith('uploaded=1'): - console.print(f'[green]Successfully Uploaded at: {response.url}') + if response.url.endswith("uploaded=1"): + console.print(f"[green]Successfully Uploaded at: {response.url}") # Check if actually uploaded except Exception: - if meta['debug']: + if meta["debug"]: console.print(response.text) console.print("It may have uploaded, go check") return @@ -112,46 +123,58 @@ async def upload(self, session, meta, disctype): console.print(payload) async def get_cat_id(self, meta): - if meta['category'] == "MOVIE": - if meta.get('is_disc') == "BMDV": - cat = '40' - elif meta.get('is_disc') == "DVD" or meta.get('is_disc') == "HDDVD": - cat = '14' + if meta["category"] == "MOVIE": + if meta.get("is_disc") == "BMDV": + cat = "40" + elif meta.get("is_disc") == "DVD" or meta.get("is_disc") == "HDDVD": + cat = "14" else: - if meta.get('sd') == 1: - cat = '4' + if meta.get("sd") == 1: + cat = "4" else: - cat = '17' - elif meta['category'] == "TV": - if meta.get('sd') == 1: - cat = '7' + cat = "17" + elif meta["category"] == "TV": + if meta.get("sd") == 1: + cat = "7" else: - cat = '34' - elif meta.get('anime') is not False: - cat = '31' + cat = "34" + elif meta.get("anime") is not False: + cat = "31" return cat def get_subtitles(self, meta): subs = [] sub_langs = [] - if meta.get('is_disc', '') != 'BDMV': - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: + if meta.get("is_disc", "") != "BDMV": + with open( + f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", + "r", + encoding="utf-8", + ) as f: mi = json.load(f) - for track in mi['media']['track']: - if track['@type'] == "Text": - language = track.get('Language') - if language in ['hr', 'en', 'bs', 'sr', 'sl']: + for track in mi["media"]["track"]: + if track["@type"] == "Text": + language = track.get("Language") + if language in ["hr", "en", "bs", "sr", "sl"]: if language not in sub_langs: sub_langs.append(language) else: - for sub in meta['bdinfo']['subtitles']: + for sub in meta["bdinfo"]["subtitles"]: if sub not in sub_langs: sub_langs.append(sub) if sub_langs != []: subs = [] sub_lang_map = { - 'hr': 1, 'en': 2, 'bs': 3, 'sr': 4, 'sl': 5, - 'Croatian': 1, 'English': 2, 'Bosnian': 3, 'Serbian': 4, 'Slovenian': 5 + "hr": 1, + "en": 2, + "bs": 3, + "sr": 4, + "sl": 5, + "Croatian": 1, + "English": 2, + "Bosnian": 3, + "Serbian": 4, + "Slovenian": 5, } for sub in sub_langs: language = sub_lang_map.get(sub) @@ -161,24 +184,41 @@ def get_subtitles(self, meta): async def edit_torrent(self, meta): if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): - THR_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - THR_torrent.metainfo['announce'] = self.config['TRACKERS']['THR']['announce_url'] - THR_torrent.metainfo['info']['source'] = "[https://www.torrenthr.org] TorrentHR.org" - Torrent.copy(THR_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent", overwrite=True) + THR_torrent = Torrent.read( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + ) + THR_torrent.metainfo["announce"] = self.config["TRACKERS"]["THR"][ + "announce_url" + ] + THR_torrent.metainfo["info"][ + "source" + ] = "[https://www.torrenthr.org] TorrentHR.org" + Torrent.copy(THR_torrent).write( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent", + overwrite=True, + ) return async def edit_desc(self, meta): pronfo = False - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: - if meta['tag'] == "": + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc: + if meta["tag"] == "": tag = "" else: tag = f" / {meta['tag'][1:]}" - if meta['is_disc'] == "DVD": - res = meta['source'] + if meta["is_disc"] == "DVD": + res = meta["source"] else: - res = meta['resolution'] + res = meta["resolution"] desc.write("[quote=Info]") name_aka = f"{meta['title']} {meta['aka']} {meta['year']}" name_aka = unidecode(name_aka) @@ -187,31 +227,35 @@ async def edit_desc(self, meta): desc.write(f"Overview: {meta['overview']}\n\n") desc.write(f"{res} / {meta['type']}{tag}\n\n") desc.write(f"Category: {meta['category']}\n") - desc.write(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n") - if meta['imdb_id'] != "0": + desc.write( + f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n" + ) + if meta["imdb_id"] != "0": desc.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") - if meta['tvdb_id'] != "0": - desc.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") + if meta["tvdb_id"] != "0": + desc.write( + f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n" + ) desc.write("[/quote]") desc.write(base) # REHOST IMAGES os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") image_glob = glob.glob("*.png") - if 'POSTER.png' in image_glob: - image_glob.remove('POSTER.png') + if "POSTER.png" in image_glob: + image_glob.remove("POSTER.png") image_list = [] for image in image_glob: url = "https://img2.torrenthr.org/api/1/upload" data = { - 'key': self.config['TRACKERS']['THR'].get('img_api'), + "key": self.config["TRACKERS"]["THR"].get("img_api"), # 'source' : base64.b64encode(open(image, "rb").read()).decode('utf8') } - files = {'source': open(image, 'rb')} + files = {"source": open(image, "rb")} response = requests.post(url, data=data, files=files) try: response = response.json() # med_url = response['image']['medium']['url'] - img_url = response['image']['url'] + img_url = response["image"]["url"] image_list.append(img_url) except json.decoder.JSONDecodeError: console.print("[yellow]Failed to upload image") @@ -221,68 +265,75 @@ async def edit_desc(self, meta): console.print(response) await asyncio.sleep(1) desc.write("[align=center]") - if meta.get('is_disc', '') == 'BDMV': - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt") as bd_file: + if meta.get("is_disc", "") == "BDMV": + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + ) as bd_file: desc.write(f"[nfo]{bd_file.read()}[/nfo]") bd_file.close() else: # ProNFO pronfo_url = f"https://www.pronfo.com/api/v1/access/upload/{self.config['TRACKERS']['THR'].get('pronfo_api_key', '')}" data = { - 'content': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), - 'theme': self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), - 'rapi': self.config['TRACKERS']['THR'].get('pronfo_rapi_id') + "content": open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", "r" + ).read(), + "theme": self.config["TRACKERS"]["THR"].get("pronfo_theme", "gray"), + "rapi": self.config["TRACKERS"]["THR"].get("pronfo_rapi_id"), } response = requests.post(pronfo_url, data=data) try: response = response.json() - if response.get('error', True) is False: - mi_img = response.get('url') + if response.get("error", True) is False: + mi_img = response.get("url") desc.write(f"\n[img]{mi_img}[/img]\n") pronfo = True except Exception: - console.print('[bold red]Error parsing pronfo response, using THR parser instead') - if meta['debug']: + console.print( + "[bold red]Error parsing pronfo response, using THR parser instead" + ) + if meta["debug"]: console.print(f"[red]{response}") console.print(response.text) - for each in image_list[:int(meta['screens'])]: + for each in image_list[: int(meta["screens"])]: desc.write(f"\n[img]{each}[/img]\n") # if pronfo: # with open(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt"), 'r') as mi_file: # full_mi = mi_file.read() # desc.write(f"[/align]\n[hide=FULL MEDIAINFO]{full_mi}[/hide][align=center]") # mi_file.close() - desc.write("\n\n[size=2][url=https://www.torrenthr.org/forums.php?action=viewtopic&topicid=8977]Created by L4G's Upload Assistant[/url][/size][/align]") + desc.write( + "\n\n[size=2][url=https://www.torrenthr.org/forums.php?action=viewtopic&topicid=8977]Created by L4G's Upload Assistant[/url][/size][/align]" + ) desc.close() return pronfo def search_existing(self, session, imdb_id, disctype): from bs4 import BeautifulSoup - imdb_id = imdb_id.replace('tt', '') - search_url = f"https://www.torrenthr.org/browse.php?search={imdb_id}&blah=2&incldead=1" + + imdb_id = imdb_id.replace("tt", "") + search_url = ( + f"https://www.torrenthr.org/browse.php?search={imdb_id}&blah=2&incldead=1" + ) search = session.get(search_url) - soup = BeautifulSoup(search.text, 'html.parser') + soup = BeautifulSoup(search.text, "html.parser") dupes = [] - for link in soup.find_all('a', href=True): - if link['href'].startswith('details.php'): - if link.get('onmousemove', False): - dupe = link['onmousemove'].split("','/images") + for link in soup.find_all("a", href=True): + if link["href"].startswith("details.php"): + if link.get("onmousemove", False): + dupe = link["onmousemove"].split("','/images") dupe = dupe[0].replace("return overlibImage('", "") dupes.append(dupe) return dupes def login(self, session): - url = 'https://www.torrenthr.org/takelogin.php' - payload = { - 'username': self.username, - 'password': self.password, - 'ssl': 'yes' - } + url = "https://www.torrenthr.org/takelogin.php" + payload = {"username": self.username, "password": self.password, "ssl": "yes"} headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } resp = session.post(url, headers=headers, data=payload) if resp.url == "https://www.torrenthr.org/index.php": - console.print('[green]Successfully logged in') + console.print("[green]Successfully logged in") return session diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 6a8b9d982..d7fdb7739 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -15,7 +15,7 @@ from src.console import console -class TIK(): +class TIK: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -26,11 +26,11 @@ class TIK(): def __init__(self, config): self.config = config - self.tracker = 'TIK' - self.source_flag = 'TIK' - self.search_url = 'https://cinematik.net/api/torrents/filter' - self.upload_url = 'https://cinematik.net/api/torrents/upload' - self.torrent_url = 'https://cinematik.net/api/torrents/' + self.tracker = "TIK" + self.source_flag = "TIK" + self.search_url = "https://cinematik.net/api/torrents/filter" + self.upload_url = "https://cinematik.net/api/torrents/upload" + self.torrent_url = "https://cinematik.net/api/torrents/" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by testing 123, Audionuts Upload Assistant[/url][/center]" self.banned_groups = [""] pass @@ -38,95 +38,140 @@ def __init__(self, config): async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')) + await common.unit3d_edit_desc( + meta, self.tracker, self.signature, comparison=True + ) + cat_id = await self.get_cat_id( + meta["category"], meta.get("foreign"), meta.get("opera"), meta.get("asian") + ) type_id = await self.get_type_id(disctype) - resolution_id = await self.get_res_id(meta['resolution']) - modq = await self.get_flag(meta, 'modq') - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + resolution_id = await self.get_res_id(meta["resolution"]) + modq = await self.get_flag(meta, "modq") + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if not meta['is_disc']: + if not meta["is_disc"]: console.print("[red]Only disc-based content allowed at TIK") return - elif meta['bdinfo'] is not None: + elif meta["bdinfo"] is not None: mi_dump = None - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as bd_file: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ) as bd_file: bd_dump = bd_file.read() else: - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as mi_file: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ) as mi_file: mi_dump = mi_file.read() bd_dump = None - if meta.get('desclink'): - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() + if meta.get("desclink"): + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() print(f"Custom Description Link: {desc}") - elif meta.get('descfile'): - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() + elif meta.get("descfile"): + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() print(f"Custom Description File Path: {desc}") else: await self.edit_desc(meta) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() - - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': await self.get_name(meta, disctype), - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'region_id': region_id, - 'distributor_id': distributor_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': 0, - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - 'mod_queue_opt_in': modq, + "name": await self.get_name(meta, disctype), + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "region_id": region_id, + "distributor_id": distributor_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": 0, + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, + "mod_queue_opt_in": modq, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - if self.config['TRACKERS'][self.tracker].get('personal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('personal_group', [])): - data['personal_release'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 + if self.config["TRACKERS"][self.tracker].get("personal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("personal_group", []) + ): + data["personal_release"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} + + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) console.print(data) console.print(f"TIK response: {response}") try: @@ -140,112 +185,118 @@ async def upload(self, meta, disctype): open_torrent.close() def get_basename(self, meta): - path = next(iter(meta['filelist']), meta['path']) + path = next(iter(meta["filelist"]), meta["path"]) return os.path.basename(path) async def get_name(self, meta, disctype): - disctype = meta.get('disctype', None) + disctype = meta.get("disctype", None) basename = self.get_basename(meta) - type = meta.get('type', "") - title = meta.get('title', "").replace('AKA', '/').strip() - alt_title = meta.get('aka', "").replace('AKA', '/').strip() - year = meta.get('year', "") - resolution = meta.get('resolution', "") - season = meta.get('season', "") - repack = meta.get('repack', "") + type = meta.get("type", "") + title = meta.get("title", "").replace("AKA", "/").strip() + alt_title = meta.get("aka", "").replace("AKA", "/").strip() + year = meta.get("year", "") + resolution = meta.get("resolution", "") + season = meta.get("season", "") + repack = meta.get("repack", "") if repack.strip(): repack = f"[{repack}]" - three_d = meta.get('3D', "") + three_d = meta.get("3D", "") three_d_tag = f"[{three_d}]" if three_d else "" - tag = meta.get('tag', "").replace("-", "- ") + tag = meta.get("tag", "").replace("-", "- ") if tag == "": tag = "- NOGRP" - source = meta.get('source', "") - uhd = meta.get('uhd', "") # noqa #841 - hdr = meta.get('hdr', "") + source = meta.get("source", "") + uhd = meta.get("uhd", "") # noqa #841 + hdr = meta.get("hdr", "") if not hdr.strip(): hdr = "SDR" - distributor = meta.get('distributor', "") # noqa F841 - video_codec = meta.get('video_codec', "") - video_encode = meta.get('video_encode', "").replace(".", "") - if 'x265' in basename: - video_encode = video_encode.replace('H', 'x') - dvd_size = meta.get('dvd_size', "") - search_year = meta.get('search_year', "") + distributor = meta.get("distributor", "") # noqa F841 + video_codec = meta.get("video_codec", "") + video_encode = meta.get("video_encode", "").replace(".", "") + if "x265" in basename: + video_encode = video_encode.replace("H", "x") + dvd_size = meta.get("dvd_size", "") + search_year = meta.get("search_year", "") if not str(search_year).strip(): search_year = year - category_name = meta.get('category', "") - foreign = meta.get('foreign') - opera = meta.get('opera') - asian = meta.get('asian') - meta['category_id'] = await self.get_cat_id(category_name, foreign, opera, asian) + category_name = meta.get("category", "") + foreign = meta.get("foreign") + opera = meta.get("opera") + asian = meta.get("asian") + meta["category_id"] = await self.get_cat_id( + category_name, foreign, opera, asian + ) name = "" alt_title_part = f" / {alt_title}" if alt_title else "" - if meta['category_id'] in ("1", "3", "5", "6"): - if meta['is_disc'] == 'BDMV': + if meta["category_id"] in ("1", "3", "5", "6"): + if meta["is_disc"] == "BDMV": name = f"{title}{alt_title_part} ({year}) {disctype} {resolution} {video_codec} {three_d_tag}" - elif meta['is_disc'] == 'DVD': + elif meta["is_disc"] == "DVD": name = f"{title}{alt_title_part} ({year}) {source} {dvd_size}" - elif meta['category'] == "TV": # TV SPECIFIC + elif meta["category"] == "TV": # TV SPECIFIC if type == "DISC": # Disk - if meta['is_disc'] == 'BDMV': + if meta["is_disc"] == "BDMV": name = f"{title}{alt_title_part} ({search_year}) {season} {disctype} {resolution} {video_codec}" - if meta['is_disc'] == 'DVD': + if meta["is_disc"] == "DVD": name = f"{title}{alt_title_part} ({search_year}) {season} {source} {dvd_size}" # User confirmation console.print(f"[yellow]Final generated name: [greee]{name}") - confirmation = cli_ui.ask_yes_no("Do you want to use this name?", default=False) # Default is 'No' + confirmation = cli_ui.ask_yes_no( + "Do you want to use this name?", default=False + ) # Default is 'No' if confirmation: return name else: - console.print("[red]Sorry, this seems to be an edge case, please report at (insert_link)") + console.print( + "[red]Sorry, this seems to be an edge case, please report at (insert_link)" + ) sys.exit(1) async def get_cat_id(self, category_name, foreign, opera, asian): category_id = { - 'FILM': '1', - 'TV': '2', - 'Foreign Film': '3', - 'Foreign TV': '4', - 'Opera & Musical': '5', - 'Asian Film': '6', - }.get(category_name, '0') - - if category_name == 'MOVIE': + "FILM": "1", + "TV": "2", + "Foreign Film": "3", + "Foreign TV": "4", + "Opera & Musical": "5", + "Asian Film": "6", + }.get(category_name, "0") + + if category_name == "MOVIE": if foreign: - category_id = '3' + category_id = "3" elif opera: - category_id = '5' + category_id = "5" elif asian: - category_id = '6' + category_id = "6" else: - category_id = '1' - elif category_name == 'TV': + category_id = "1" + elif category_name == "TV": if foreign: - category_id = '4' + category_id = "4" elif opera: - category_id = '5' + category_id = "5" else: - category_id = '2' + category_id = "2" return category_id async def get_type_id(self, disctype): type_id_map = { - 'Custom': '1', - 'BD100': '3', - 'BD66': '4', - 'BD50': '5', - 'BD25': '6', - 'NTSC DVD9': '7', - 'NTSC DVD5': '8', - 'PAL DVD9': '9', - 'PAL DVD5': '10', - '3D': '11' + "Custom": "1", + "BD100": "3", + "BD66": "4", + "BD50": "5", + "BD25": "6", + "NTSC DVD9": "7", + "NTSC DVD5": "8", + "PAL DVD9": "9", + "PAL DVD5": "10", + "3D": "11", } if not disctype: @@ -253,28 +304,28 @@ async def get_type_id(self, disctype): return None disctype_value = disctype[0] if isinstance(disctype, list) else disctype - type_id = type_id_map.get(disctype_value, '1') # '1' is the default fallback + type_id = type_id_map.get(disctype_value, "1") # '1' is the default fallback return type_id async def get_res_id(self, resolution): resolution_id = { - 'Other': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "Other": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id async def get_flag(self, meta, flag_name): - config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + config_flag = self.config["TRACKERS"][self.tracker].get(flag_name) if config_flag is not None: return 1 if config_flag else 0 @@ -282,13 +333,16 @@ async def get_flag(self, meta, flag_name): async def edit_desc(self, meta): from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + prep = Prep( + screens=meta["screens"], img_host=meta["imghost"], config=self.config + ) # Fetch additional IMDb metadata meta_imdb = await prep.imdb_other_meta(meta) # noqa #F841 - if len(meta.get('discs', [])) > 0: - summary = meta['discs'][0].get('summary', '') + if len(meta.get("discs", [])) > 0: + summary = meta["discs"][0].get("summary", "") else: summary = None @@ -302,7 +356,7 @@ async def edit_desc(self, meta): else: total_bitrate = "Unknown" - country_name = self.country_code_to_name(meta.get('region')) + country_name = self.country_code_to_name(meta.get("region")) # Rehost poster if tmdb_poster is available poster_url = f"https://image.tmdb.org/t/p/original{meta.get('tmdb_poster', '')}" @@ -314,10 +368,14 @@ async def edit_desc(self, meta): # Check if either poster.jpg or poster.png already exists if os.path.exists(poster_jpg_path): poster_path = poster_jpg_path - console.print("[green]Poster already exists as poster.jpg, skipping download.[/green]") + console.print( + "[green]Poster already exists as poster.jpg, skipping download.[/green]" + ) elif os.path.exists(poster_png_path): poster_path = poster_png_path - console.print("[green]Poster already exists as poster.png, skipping download.[/green]") + console.print( + "[green]Poster already exists as poster.png, skipping download.[/green]" + ) else: # No poster file exists, download the poster image poster_path = poster_jpg_path # Default to saving as poster.jpg @@ -331,11 +389,13 @@ async def edit_desc(self, meta): if os.path.exists(poster_path): try: console.print("Uploading standard poster to image host....") - new_poster_url, _ = prep.upload_screens(meta, 1, 1, 0, 1, [poster_path], {}) + new_poster_url, _ = prep.upload_screens( + meta, 1, 1, 0, 1, [poster_path], {} + ) # Ensure that the new poster URL is assigned only once if len(new_poster_url) > 0: - poster_url = new_poster_url[0]['raw_url'] + poster_url = new_poster_url[0]["raw_url"] except Exception as e: console.print(f"[red]Error uploading poster: {e}[/red]") else: @@ -344,21 +404,25 @@ async def edit_desc(self, meta): # Generate the description text desc_text = [] - images = meta['image_list'] - discs = meta.get('discs', []) # noqa #F841 + images = meta["image_list"] + discs = meta.get("discs", []) # noqa #F841 if len(images) >= 4: - image_link_1 = images[0]['raw_url'] - image_link_2 = images[1]['raw_url'] - image_link_3 = images[2]['raw_url'] - image_link_4 = images[3]['raw_url'] - image_link_5 = images[4]['raw_url'] - image_link_6 = images[5]['raw_url'] + image_link_1 = images[0]["raw_url"] + image_link_2 = images[1]["raw_url"] + image_link_3 = images[2]["raw_url"] + image_link_4 = images[3]["raw_url"] + image_link_5 = images[4]["raw_url"] + image_link_6 = images[5]["raw_url"] else: - image_link_1 = image_link_2 = image_link_3 = image_link_4 = image_link_5 = image_link_6 = "" + image_link_1 = image_link_2 = image_link_3 = image_link_4 = image_link_5 = ( + image_link_6 + ) = "" # Write the cover section with rehosted poster URL - desc_text.append("[h3]Cover[/h3] [color=red]A stock poster has been automatically added, but you'll get more love if you include a proper cover, see rule 6.6[/color]\n") + desc_text.append( + "[h3]Cover[/h3] [color=red]A stock poster has been automatically added, but you'll get more love if you include a proper cover, see rule 6.6[/color]\n" + ) desc_text.append("[center]\n") desc_text.append(f"[IMG=500]{poster_url}[/IMG]\n") desc_text.append("[/center]\n\n") @@ -375,8 +439,12 @@ async def edit_desc(self, meta): desc_text.append("[/center]\n\n") # Write synopsis section with the custom title - desc_text.append("[h3]Synopsis/Review/Personal Thoughts (edit as needed)[/h3]\n") - desc_text.append("[color=red]Default TMDB sypnosis added, more love if you use a sypnosis from credible film institutions such as the BFI or directly quoting well-known film critics, see rule 6.3[/color]\n") + desc_text.append( + "[h3]Synopsis/Review/Personal Thoughts (edit as needed)[/h3]\n" + ) + desc_text.append( + "[color=red]Default TMDB sypnosis added, more love if you use a sypnosis from credible film institutions such as the BFI or directly quoting well-known film critics, see rule 6.3[/color]\n" + ) desc_text.append("[quote]\n") desc_text.append(f"{meta.get('overview', 'No synopsis available.')}\n") desc_text.append("[/quote]\n\n") @@ -384,29 +452,48 @@ async def edit_desc(self, meta): # Write technical info section desc_text.append("[h3]Technical Info[/h3]\n") desc_text.append("[code]\n") - if meta['is_disc'] == 'BDMV': - desc_text.append(f" Disc Label.........:{meta.get('bdinfo', {}).get('label', '')}\n") - desc_text.append(f" IMDb...............: [url=https://www.imdb.com/title/tt{meta.get('imdb_id')}]{meta.get('imdb_rating', '')}[/url]\n") + if meta["is_disc"] == "BDMV": + desc_text.append( + f" Disc Label.........:{meta.get('bdinfo', {}).get('label', '')}\n" + ) + desc_text.append( + f" IMDb...............: [url=https://www.imdb.com/title/tt{meta.get('imdb_id')}]{meta.get('imdb_rating', '')}[/url]\n" + ) desc_text.append(f" Year...............: {meta.get('year', '')}\n") desc_text.append(f" Country............: {country_name}\n") - if meta['is_disc'] == 'BDMV': - desc_text.append(f" Runtime............: {meta.get('bdinfo', {}).get('length', '')} hrs [color=red](double check this is actual runtime)[/color]\n") + if meta["is_disc"] == "BDMV": + desc_text.append( + f" Runtime............: {meta.get('bdinfo', {}).get('length', '')} hrs [color=red](double check this is actual runtime)[/color]\n" + ) else: - desc_text.append(" Runtime............: [color=red]Insert the actual runtime[/color]\n") - - if meta['is_disc'] == 'BDMV': - audio_languages = ', '.join([f"{track.get('language', 'Unknown')} {track.get('codec', 'Unknown')} {track.get('channels', 'Unknown')}" for track in meta.get('bdinfo', {}).get('audio', [])]) + desc_text.append( + " Runtime............: [color=red]Insert the actual runtime[/color]\n" + ) + + if meta["is_disc"] == "BDMV": + audio_languages = ", ".join( + [ + f"{track.get('language', 'Unknown')} {track.get('codec', 'Unknown')} {track.get('channels', 'Unknown')}" + for track in meta.get("bdinfo", {}).get("audio", []) + ] + ) desc_text.append(f" Audio..............: {audio_languages}\n") - desc_text.append(f" Subtitles..........: {', '.join(meta.get('bdinfo', {}).get('subtitles', []))}\n") + desc_text.append( + f" Subtitles..........: {', '.join(meta.get('bdinfo', {}).get('subtitles', []))}\n" + ) else: # Process each disc's `vob_mi` or `ifo_mi` to extract audio and subtitles separately - for disc in meta.get('discs', []): - vob_mi = disc.get('vob_mi', '') - ifo_mi = disc.get('ifo_mi', '') + for disc in meta.get("discs", []): + vob_mi = disc.get("vob_mi", "") + ifo_mi = disc.get("ifo_mi", "") unique_audio = set() # Store unique audio strings - audio_section = vob_mi.split('\n\nAudio\n')[1].split('\n\n')[0] if 'Audio\n' in vob_mi else None + audio_section = ( + vob_mi.split("\n\nAudio\n")[1].split("\n\n")[0] + if "Audio\n" in vob_mi + else None + ) if audio_section: if "AC-3" in audio_section: codec = "AC-3" @@ -421,41 +508,74 @@ async def edit_desc(self, meta): else: codec = "Unknown" - channels = audio_section.split("Channel(s)")[1].split(":")[1].strip().split(" ")[0] if "Channel(s)" in audio_section else "Unknown" + channels = ( + audio_section.split("Channel(s)")[1] + .split(":")[1] + .strip() + .split(" ")[0] + if "Channel(s)" in audio_section + else "Unknown" + ) # Convert 6 channels to 5.1, otherwise leave as is channels = "5.1" if channels == "6" else channels - language = disc.get('ifo_mi_full', '').split('Language')[1].split(":")[1].strip().split('\n')[0] if "Language" in disc.get('ifo_mi_full', '') else "Unknown" + language = ( + disc.get("ifo_mi_full", "") + .split("Language")[1] + .split(":")[1] + .strip() + .split("\n")[0] + if "Language" in disc.get("ifo_mi_full", "") + else "Unknown" + ) audio_info = f"{language} {codec} {channels}" unique_audio.add(audio_info) # Append audio information to the description if unique_audio: - desc_text.append(f" Audio..............: {', '.join(sorted(unique_audio))}\n") + desc_text.append( + f" Audio..............: {', '.join(sorted(unique_audio))}\n" + ) # Subtitle extraction using the helper function unique_subtitles = self.parse_subtitles(ifo_mi) # Append subtitle information to the description if unique_subtitles: - desc_text.append(f" Subtitles..........: {', '.join(sorted(unique_subtitles))}\n") - - if meta['is_disc'] == 'BDMV': - video_info = meta.get('bdinfo', {}).get('video', []) - video_codec = video_info[0].get('codec', 'Unknown') - video_bitrate = video_info[0].get('bitrate', 'Unknown') - desc_text.append(f" Video Format.......: {video_codec} / {video_bitrate}\n") + desc_text.append( + f" Subtitles..........: {', '.join(sorted(unique_subtitles))}\n" + ) + + if meta["is_disc"] == "BDMV": + video_info = meta.get("bdinfo", {}).get("video", []) + video_codec = video_info[0].get("codec", "Unknown") + video_bitrate = video_info[0].get("bitrate", "Unknown") + desc_text.append( + f" Video Format.......: {video_codec} / {video_bitrate}\n" + ) else: - desc_text.append(f" DVD Format.........: {meta.get('source', 'Unknown')}\n") - desc_text.append(" Film Aspect Ratio..: [color=red]The actual aspect ratio of the content, not including the black bars[/color]\n") - if meta['is_disc'] == 'BDMV': - desc_text.append(f" Source.............: {meta.get('disctype', 'Unknown')}\n") + desc_text.append( + f" DVD Format.........: {meta.get('source', 'Unknown')}\n" + ) + desc_text.append( + " Film Aspect Ratio..: [color=red]The actual aspect ratio of the content, not including the black bars[/color]\n" + ) + if meta["is_disc"] == "BDMV": + desc_text.append( + f" Source.............: {meta.get('disctype', 'Unknown')}\n" + ) else: - desc_text.append(f" Source.............: {meta.get('dvd_size', 'Unknown')}\n") - desc_text.append(f" Film Distributor...: [url={meta.get('distributor_link', '')}]{meta.get('distributor', 'Unknown')}[url] [color=red]Don't forget the actual distributor link\n") + desc_text.append( + f" Source.............: {meta.get('dvd_size', 'Unknown')}\n" + ) + desc_text.append( + f" Film Distributor...: [url={meta.get('distributor_link', '')}]{meta.get('distributor', 'Unknown')}[url] [color=red]Don't forget the actual distributor link\n" + ) desc_text.append(f" Average Bitrate....: {total_bitrate}\n") - desc_text.append(" Ripping Program....: [color=red]Specify - if it's your rip or custom version, otherwise 'Not my rip'[/color]\n") + desc_text.append( + " Ripping Program....: [color=red]Specify - if it's your rip or custom version, otherwise 'Not my rip'[/color]\n" + ) desc_text.append("\n") - if meta.get('untouched') is True: + if meta.get("untouched") is True: desc_text.append(" Menus......: [X] Untouched\n") desc_text.append(" Video......: [X] Untouched\n") desc_text.append(" Extras.....: [X] Untouched\n") @@ -485,23 +605,29 @@ async def edit_desc(self, meta): desc_text.append(f" - {meta.get('uploader_comments', 'No comments.')}\n") # Convert the list to a single string for the description - description = ''.join(desc_text) + description = "".join(desc_text) # Ask user if they want to edit or keep the description console.print(f"Current description: {description}", markup=False) console.print("[cyan]Do you want to edit or keep the description?[/cyan]") edit_choice = input("Enter 'e' to edit, or press Enter to keep it as is: ") - if edit_choice.lower() == 'e': + if edit_choice.lower() == "e": edited_description = click.edit(description) if edited_description: description = edited_description.strip() - console.print(f"Final description after editing: {description}", markup=False) + console.print( + f"Final description after editing: {description}", markup=False + ) else: console.print("[green]Keeping the original description.[/green]") # Write the final description to the file - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc_file: + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as desc_file: desc_file.write(description) def parse_subtitles(self, disc_mi): @@ -524,72 +650,230 @@ def parse_subtitles(self, disc_mi): def country_code_to_name(self, code): country_mapping = { - 'AFG': 'Afghanistan', 'ALB': 'Albania', 'DZA': 'Algeria', 'AND': 'Andorra', 'AGO': 'Angola', - 'ARG': 'Argentina', 'ARM': 'Armenia', 'AUS': 'Australia', 'AUT': 'Austria', 'AZE': 'Azerbaijan', - 'BHS': 'Bahamas', 'BHR': 'Bahrain', 'BGD': 'Bangladesh', 'BRB': 'Barbados', 'BLR': 'Belarus', - 'BEL': 'Belgium', 'BLZ': 'Belize', 'BEN': 'Benin', 'BTN': 'Bhutan', 'BOL': 'Bolivia', - 'BIH': 'Bosnia and Herzegovina', 'BWA': 'Botswana', 'BRA': 'Brazil', 'BRN': 'Brunei', - 'BGR': 'Bulgaria', 'BFA': 'Burkina Faso', 'BDI': 'Burundi', 'CPV': 'Cabo Verde', 'KHM': 'Cambodia', - 'CMR': 'Cameroon', 'CAN': 'Canada', 'CAF': 'Central African Republic', 'TCD': 'Chad', 'CHL': 'Chile', - 'CHN': 'China', 'COL': 'Colombia', 'COM': 'Comoros', 'COG': 'Congo', 'CRI': 'Costa Rica', - 'HRV': 'Croatia', 'CUB': 'Cuba', 'CYP': 'Cyprus', 'CZE': 'Czech Republic', 'DNK': 'Denmark', - 'DJI': 'Djibouti', 'DMA': 'Dominica', 'DOM': 'Dominican Republic', 'ECU': 'Ecuador', 'EGY': 'Egypt', - 'SLV': 'El Salvador', 'GNQ': 'Equatorial Guinea', 'ERI': 'Eritrea', 'EST': 'Estonia', - 'SWZ': 'Eswatini', 'ETH': 'Ethiopia', 'FJI': 'Fiji', 'FIN': 'Finland', 'FRA': 'France', - 'GAB': 'Gabon', 'GMB': 'Gambia', 'GEO': 'Georgia', 'DEU': 'Germany', 'GHA': 'Ghana', - 'GRC': 'Greece', 'GRD': 'Grenada', 'GTM': 'Guatemala', 'GIN': 'Guinea', 'GNB': 'Guinea-Bissau', - 'GUY': 'Guyana', 'HTI': 'Haiti', 'HND': 'Honduras', 'HUN': 'Hungary', 'ISL': 'Iceland', 'IND': 'India', - 'IDN': 'Indonesia', 'IRN': 'Iran', 'IRQ': 'Iraq', 'IRL': 'Ireland', 'ISR': 'Israel', 'ITA': 'Italy', - 'JAM': 'Jamaica', 'JPN': 'Japan', 'JOR': 'Jordan', 'KAZ': 'Kazakhstan', 'KEN': 'Kenya', - 'KIR': 'Kiribati', 'KOR': 'Korea', 'KWT': 'Kuwait', 'KGZ': 'Kyrgyzstan', 'LAO': 'Laos', 'LVA': 'Latvia', - 'LBN': 'Lebanon', 'LSO': 'Lesotho', 'LBR': 'Liberia', 'LBY': 'Libya', 'LIE': 'Liechtenstein', - 'LTU': 'Lithuania', 'LUX': 'Luxembourg', 'MDG': 'Madagascar', 'MWI': 'Malawi', 'MYS': 'Malaysia', - 'MDV': 'Maldives', 'MLI': 'Mali', 'MLT': 'Malta', 'MHL': 'Marshall Islands', 'MRT': 'Mauritania', - 'MUS': 'Mauritius', 'MEX': 'Mexico', 'FSM': 'Micronesia', 'MDA': 'Moldova', 'MCO': 'Monaco', - 'MNG': 'Mongolia', 'MNE': 'Montenegro', 'MAR': 'Morocco', 'MOZ': 'Mozambique', 'MMR': 'Myanmar', - 'NAM': 'Namibia', 'NRU': 'Nauru', 'NPL': 'Nepal', 'NLD': 'Netherlands', 'NZL': 'New Zealand', - 'NIC': 'Nicaragua', 'NER': 'Niger', 'NGA': 'Nigeria', 'MKD': 'North Macedonia', 'NOR': 'Norway', - 'OMN': 'Oman', 'PAK': 'Pakistan', 'PLW': 'Palau', 'PAN': 'Panama', 'PNG': 'Papua New Guinea', - 'PRY': 'Paraguay', 'PER': 'Peru', 'PHL': 'Philippines', 'POL': 'Poland', 'PRT': 'Portugal', - 'QAT': 'Qatar', 'ROU': 'Romania', 'RUS': 'Russia', 'RWA': 'Rwanda', 'KNA': 'Saint Kitts and Nevis', - 'LCA': 'Saint Lucia', 'VCT': 'Saint Vincent and the Grenadines', 'WSM': 'Samoa', 'SMR': 'San Marino', - 'STP': 'Sao Tome and Principe', 'SAU': 'Saudi Arabia', 'SEN': 'Senegal', 'SRB': 'Serbia', - 'SYC': 'Seychelles', 'SLE': 'Sierra Leone', 'SGP': 'Singapore', 'SVK': 'Slovakia', 'SVN': 'Slovenia', - 'SLB': 'Solomon Islands', 'SOM': 'Somalia', 'ZAF': 'South Africa', 'SSD': 'South Sudan', - 'ESP': 'Spain', 'LKA': 'Sri Lanka', 'SDN': 'Sudan', 'SUR': 'Suriname', 'SWE': 'Sweden', - 'CHE': 'Switzerland', 'SYR': 'Syria', 'TWN': 'Taiwan', 'TJK': 'Tajikistan', 'TZA': 'Tanzania', - 'THA': 'Thailand', 'TLS': 'Timor-Leste', 'TGO': 'Togo', 'TON': 'Tonga', 'TTO': 'Trinidad and Tobago', - 'TUN': 'Tunisia', 'TUR': 'Turkey', 'TKM': 'Turkmenistan', 'TUV': 'Tuvalu', 'UGA': 'Uganda', - 'UKR': 'Ukraine', 'ARE': 'United Arab Emirates', 'GBR': 'United Kingdom', 'USA': 'United States', - 'URY': 'Uruguay', 'UZB': 'Uzbekistan', 'VUT': 'Vanuatu', 'VEN': 'Venezuela', 'VNM': 'Vietnam', - 'YEM': 'Yemen', 'ZMB': 'Zambia', 'ZWE': 'Zimbabwe' + "AFG": "Afghanistan", + "ALB": "Albania", + "DZA": "Algeria", + "AND": "Andorra", + "AGO": "Angola", + "ARG": "Argentina", + "ARM": "Armenia", + "AUS": "Australia", + "AUT": "Austria", + "AZE": "Azerbaijan", + "BHS": "Bahamas", + "BHR": "Bahrain", + "BGD": "Bangladesh", + "BRB": "Barbados", + "BLR": "Belarus", + "BEL": "Belgium", + "BLZ": "Belize", + "BEN": "Benin", + "BTN": "Bhutan", + "BOL": "Bolivia", + "BIH": "Bosnia and Herzegovina", + "BWA": "Botswana", + "BRA": "Brazil", + "BRN": "Brunei", + "BGR": "Bulgaria", + "BFA": "Burkina Faso", + "BDI": "Burundi", + "CPV": "Cabo Verde", + "KHM": "Cambodia", + "CMR": "Cameroon", + "CAN": "Canada", + "CAF": "Central African Republic", + "TCD": "Chad", + "CHL": "Chile", + "CHN": "China", + "COL": "Colombia", + "COM": "Comoros", + "COG": "Congo", + "CRI": "Costa Rica", + "HRV": "Croatia", + "CUB": "Cuba", + "CYP": "Cyprus", + "CZE": "Czech Republic", + "DNK": "Denmark", + "DJI": "Djibouti", + "DMA": "Dominica", + "DOM": "Dominican Republic", + "ECU": "Ecuador", + "EGY": "Egypt", + "SLV": "El Salvador", + "GNQ": "Equatorial Guinea", + "ERI": "Eritrea", + "EST": "Estonia", + "SWZ": "Eswatini", + "ETH": "Ethiopia", + "FJI": "Fiji", + "FIN": "Finland", + "FRA": "France", + "GAB": "Gabon", + "GMB": "Gambia", + "GEO": "Georgia", + "DEU": "Germany", + "GHA": "Ghana", + "GRC": "Greece", + "GRD": "Grenada", + "GTM": "Guatemala", + "GIN": "Guinea", + "GNB": "Guinea-Bissau", + "GUY": "Guyana", + "HTI": "Haiti", + "HND": "Honduras", + "HUN": "Hungary", + "ISL": "Iceland", + "IND": "India", + "IDN": "Indonesia", + "IRN": "Iran", + "IRQ": "Iraq", + "IRL": "Ireland", + "ISR": "Israel", + "ITA": "Italy", + "JAM": "Jamaica", + "JPN": "Japan", + "JOR": "Jordan", + "KAZ": "Kazakhstan", + "KEN": "Kenya", + "KIR": "Kiribati", + "KOR": "Korea", + "KWT": "Kuwait", + "KGZ": "Kyrgyzstan", + "LAO": "Laos", + "LVA": "Latvia", + "LBN": "Lebanon", + "LSO": "Lesotho", + "LBR": "Liberia", + "LBY": "Libya", + "LIE": "Liechtenstein", + "LTU": "Lithuania", + "LUX": "Luxembourg", + "MDG": "Madagascar", + "MWI": "Malawi", + "MYS": "Malaysia", + "MDV": "Maldives", + "MLI": "Mali", + "MLT": "Malta", + "MHL": "Marshall Islands", + "MRT": "Mauritania", + "MUS": "Mauritius", + "MEX": "Mexico", + "FSM": "Micronesia", + "MDA": "Moldova", + "MCO": "Monaco", + "MNG": "Mongolia", + "MNE": "Montenegro", + "MAR": "Morocco", + "MOZ": "Mozambique", + "MMR": "Myanmar", + "NAM": "Namibia", + "NRU": "Nauru", + "NPL": "Nepal", + "NLD": "Netherlands", + "NZL": "New Zealand", + "NIC": "Nicaragua", + "NER": "Niger", + "NGA": "Nigeria", + "MKD": "North Macedonia", + "NOR": "Norway", + "OMN": "Oman", + "PAK": "Pakistan", + "PLW": "Palau", + "PAN": "Panama", + "PNG": "Papua New Guinea", + "PRY": "Paraguay", + "PER": "Peru", + "PHL": "Philippines", + "POL": "Poland", + "PRT": "Portugal", + "QAT": "Qatar", + "ROU": "Romania", + "RUS": "Russia", + "RWA": "Rwanda", + "KNA": "Saint Kitts and Nevis", + "LCA": "Saint Lucia", + "VCT": "Saint Vincent and the Grenadines", + "WSM": "Samoa", + "SMR": "San Marino", + "STP": "Sao Tome and Principe", + "SAU": "Saudi Arabia", + "SEN": "Senegal", + "SRB": "Serbia", + "SYC": "Seychelles", + "SLE": "Sierra Leone", + "SGP": "Singapore", + "SVK": "Slovakia", + "SVN": "Slovenia", + "SLB": "Solomon Islands", + "SOM": "Somalia", + "ZAF": "South Africa", + "SSD": "South Sudan", + "ESP": "Spain", + "LKA": "Sri Lanka", + "SDN": "Sudan", + "SUR": "Suriname", + "SWE": "Sweden", + "CHE": "Switzerland", + "SYR": "Syria", + "TWN": "Taiwan", + "TJK": "Tajikistan", + "TZA": "Tanzania", + "THA": "Thailand", + "TLS": "Timor-Leste", + "TGO": "Togo", + "TON": "Tonga", + "TTO": "Trinidad and Tobago", + "TUN": "Tunisia", + "TUR": "Turkey", + "TKM": "Turkmenistan", + "TUV": "Tuvalu", + "UGA": "Uganda", + "UKR": "Ukraine", + "ARE": "United Arab Emirates", + "GBR": "United Kingdom", + "USA": "United States", + "URY": "Uruguay", + "UZB": "Uzbekistan", + "VUT": "Vanuatu", + "VEN": "Venezuela", + "VNM": "Vietnam", + "YEM": "Yemen", + "ZMB": "Zambia", + "ZWE": "Zimbabwe", } - return country_mapping.get(code.upper(), 'Unknown Country') + return country_mapping.get(code.upper(), "Unknown Country") async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") - disctype = meta.get('disctype', None) + disctype = meta.get("disctype", None) params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')), - 'types[]': await self.get_type_id(disctype), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id( + meta["category"], + meta.get("foreign"), + meta.get("opera"), + meta.get("asian"), + ), + "types[]": await self.get_type_id(disctype), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/TL.py b/src/trackers/TL.py index f563a6839..9903afb87 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -8,72 +8,78 @@ from pathlib import Path -class TL(): +class TL: CATEGORIES = { - 'Anime': 34, - 'Movie4K': 47, - 'MovieBluray': 13, - 'MovieBlurayRip': 14, - 'MovieCam': 8, - 'MovieTS': 9, - 'MovieDocumentary': 29, - 'MovieDvd': 12, - 'MovieDvdRip': 11, - 'MovieForeign': 36, - 'MovieHdRip': 43, - 'MovieWebrip': 37, - 'TvBoxsets': 27, - 'TvEpisodes': 26, - 'TvEpisodesHd': 32, - 'TvForeign': 44 + "Anime": 34, + "Movie4K": 47, + "MovieBluray": 13, + "MovieBlurayRip": 14, + "MovieCam": 8, + "MovieTS": 9, + "MovieDocumentary": 29, + "MovieDvd": 12, + "MovieDvdRip": 11, + "MovieForeign": 36, + "MovieHdRip": 43, + "MovieWebrip": 37, + "TvBoxsets": 27, + "TvEpisodes": 26, + "TvEpisodesHd": 32, + "TvForeign": 44, } def __init__(self, config): self.config = config - self.tracker = 'TL' - self.source_flag = 'TorrentLeech.org' - self.upload_url = 'https://www.torrentleech.org/torrents/upload/apiupload' + self.tracker = "TL" + self.source_flag = "TorrentLeech.org" + self.upload_url = "https://www.torrentleech.org/torrents/upload/apiupload" self.signature = None self.banned_groups = [""] - self.announce_key = self.config['TRACKERS'][self.tracker]['announce_key'] - self.config['TRACKERS'][self.tracker]['announce_url'] = f"https://tracker.torrentleech.org/a/{self.announce_key}/announce" + self.announce_key = self.config["TRACKERS"][self.tracker]["announce_key"] + self.config["TRACKERS"][self.tracker][ + "announce_url" + ] = f"https://tracker.torrentleech.org/a/{self.announce_key}/announce" pass async def get_cat_id(self, common, meta): - if meta.get('anime', 0): - return self.CATEGORIES['Anime'] + if meta.get("anime", 0): + return self.CATEGORIES["Anime"] - if meta['category'] == 'MOVIE': - if meta['original_language'] != 'en': - return self.CATEGORIES['MovieForeign'] - elif 'Documentary' in meta['genres']: - return self.CATEGORIES['MovieDocumentary'] - elif meta['uhd']: - return self.CATEGORIES['Movie4K'] - elif meta['is_disc'] in ('BDMV', 'HDDVD') or (meta['type'] == 'REMUX' and meta['source'] in ('BluRay', 'HDDVD')): - return self.CATEGORIES['MovieBluray'] - elif meta['type'] == 'ENCODE' and meta['source'] in ('BluRay', 'HDDVD'): - return self.CATEGORIES['MovieBlurayRip'] - elif meta['is_disc'] == 'DVD' or (meta['type'] == 'REMUX' and 'DVD' in meta['source']): - return self.CATEGORIES['MovieDvd'] - elif meta['type'] == 'ENCODE' and 'DVD' in meta['source']: - return self.CATEGORIES['MovieDvdRip'] - elif 'WEB' in meta['type']: - return self.CATEGORIES['MovieWebrip'] - elif meta['type'] == 'HDTV': - return self.CATEGORIES['MovieHdRip'] - elif meta['category'] == 'TV': - if meta['original_language'] != 'en': - return self.CATEGORIES['TvForeign'] - elif meta.get('tv_pack', 0): - return self.CATEGORIES['TvBoxsets'] - elif meta['sd']: - return self.CATEGORIES['TvEpisodes'] + if meta["category"] == "MOVIE": + if meta["original_language"] != "en": + return self.CATEGORIES["MovieForeign"] + elif "Documentary" in meta["genres"]: + return self.CATEGORIES["MovieDocumentary"] + elif meta["uhd"]: + return self.CATEGORIES["Movie4K"] + elif meta["is_disc"] in ("BDMV", "HDDVD") or ( + meta["type"] == "REMUX" and meta["source"] in ("BluRay", "HDDVD") + ): + return self.CATEGORIES["MovieBluray"] + elif meta["type"] == "ENCODE" and meta["source"] in ("BluRay", "HDDVD"): + return self.CATEGORIES["MovieBlurayRip"] + elif meta["is_disc"] == "DVD" or ( + meta["type"] == "REMUX" and "DVD" in meta["source"] + ): + return self.CATEGORIES["MovieDvd"] + elif meta["type"] == "ENCODE" and "DVD" in meta["source"]: + return self.CATEGORIES["MovieDvdRip"] + elif "WEB" in meta["type"]: + return self.CATEGORIES["MovieWebrip"] + elif meta["type"] == "HDTV": + return self.CATEGORIES["MovieHdRip"] + elif meta["category"] == "TV": + if meta["original_language"] != "en": + return self.CATEGORIES["TvForeign"] + elif meta.get("tv_pack", 0): + return self.CATEGORIES["TvBoxsets"] + elif meta["sd"]: + return self.CATEGORIES["TvEpisodes"] else: - return self.CATEGORIES['TvEpisodesHd'] + return self.CATEGORIES["TvEpisodesHd"] - raise NotImplementedError('Failed to determine TL category!') + raise NotImplementedError("Failed to determine TL category!") async def upload(self, meta, disctype): common = COMMON(config=self.config) @@ -81,32 +87,44 @@ async def upload(self, meta, disctype): cat_id = await self.get_cat_id(common, meta) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+', encoding='utf-8') + open_desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "a+", + encoding="utf-8", + ) - info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] is not None else 'MEDIAINFO_CLEANPATH' - open_info = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{info_filename}.txt", 'r', encoding='utf-8') - open_desc.write('\n\n') + info_filename = ( + "BD_SUMMARY_00" if meta["bdinfo"] is not None else "MEDIAINFO_CLEANPATH" + ) + open_info = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/{info_filename}.txt", + "r", + encoding="utf-8", + ) + open_desc.write("\n\n") open_desc.write(open_info.read()) open_info.close() open_desc.seek(0) - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) files = { - 'nfo': open_desc, - 'torrent': (self.get_name(meta) + '.torrent', open_torrent) - } - data = { - 'announcekey': self.announce_key, - 'category': cat_id + "nfo": open_desc, + "torrent": (self.get_name(meta) + ".torrent", open_torrent), } + data = {"announcekey": self.announce_key, "category": cat_id} headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, files=files, data=data, headers=headers + ) if not response.text.isnumeric(): - console.print(f'[red]{response.text}') + console.print(f"[red]{response.text}") else: console.print("[cyan]Request Data:") console.print(data) @@ -114,5 +132,5 @@ async def upload(self, meta, disctype): open_desc.close() def get_name(self, meta): - path = Path(meta['path']) + path = Path(meta["path"]) return path.stem if path.is_file() else path.name diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 9337e8a83..908ca0d97 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -13,96 +13,116 @@ from src.console import console -class TTG(): +class TTG: def __init__(self, config): self.config = config - self.tracker = 'TTG' - self.source_flag = 'TTG' - self.username = str(config['TRACKERS']['TTG'].get('username', '')).strip() - self.password = str(config['TRACKERS']['TTG'].get('password', '')).strip() - self.passid = str(config['TRACKERS']['TTG'].get('login_question', '0')).strip() - self.passan = str(config['TRACKERS']['TTG'].get('login_answer', '')).strip() - self.uid = str(config['TRACKERS']['TTG'].get('user_id', '')).strip() - self.passkey = str(config['TRACKERS']['TTG'].get('announce_url', '')).strip().split('/')[-1] + self.tracker = "TTG" + self.source_flag = "TTG" + self.username = str(config["TRACKERS"]["TTG"].get("username", "")).strip() + self.password = str(config["TRACKERS"]["TTG"].get("password", "")).strip() + self.passid = str(config["TRACKERS"]["TTG"].get("login_question", "0")).strip() + self.passan = str(config["TRACKERS"]["TTG"].get("login_answer", "")).strip() + self.uid = str(config["TRACKERS"]["TTG"].get("user_id", "")).strip() + self.passkey = ( + str(config["TRACKERS"]["TTG"].get("announce_url", "")) + .strip() + .split("/")[-1] + ) self.signature = None self.banned_groups = [""] async def edit_name(self, meta): - ttg_name = meta['name'] + ttg_name = meta["name"] - remove_list = ['Dubbed', 'Dual-Audio'] + remove_list = ["Dubbed", "Dual-Audio"] for each in remove_list: - ttg_name = ttg_name.replace(each, '') - ttg_name = ttg_name.replace('PQ10', 'HDR') - ttg_name = ttg_name.replace('.', '{@}') + ttg_name = ttg_name.replace(each, "") + ttg_name = ttg_name.replace("PQ10", "HDR") + ttg_name = ttg_name.replace(".", "{@}") return ttg_name async def get_type_id(self, meta): - lang = meta.get('original_language', 'UNKNOWN').upper() - if meta['category'] == "MOVIE": + lang = meta.get("original_language", "UNKNOWN").upper() + if meta["category"] == "MOVIE": # 51 = DVDRip - if meta['resolution'].startswith("720"): + if meta["resolution"].startswith("720"): type_id = 52 # 720p - if meta['resolution'].startswith("1080"): + if meta["resolution"].startswith("1080"): type_id = 53 # 1080p/i - if meta['is_disc'] == "BDMV": + if meta["is_disc"] == "BDMV": type_id = 54 # Blu-ray disc - elif meta['category'] == "TV": - if meta.get('tv_pack', 0) != 1: + elif meta["category"] == "TV": + if meta.get("tv_pack", 0) != 1: # TV Singles - if meta['resolution'].startswith("720"): + if meta["resolution"].startswith("720"): type_id = 69 # 720p TV EU/US - if lang in ('ZH', 'CN', 'CMN'): + if lang in ("ZH", "CN", "CMN"): type_id = 76 # Chinese - if meta['resolution'].startswith("1080"): + if meta["resolution"].startswith("1080"): type_id = 70 # 1080 TV EU/US - if lang in ('ZH', 'CN', 'CMN'): + if lang in ("ZH", "CN", "CMN"): type_id = 75 # Chinese - if lang in ('KR', 'KO'): + if lang in ("KR", "KO"): type_id = 75 # Korean - if lang in ('JA', 'JP'): + if lang in ("JA", "JP"): type_id = 73 # Japanese else: # TV Packs type_id = 87 # EN/US - if lang in ('KR', 'KO'): + if lang in ("KR", "KO"): type_id = 99 # Korean - if lang in ('JA', 'JP'): + if lang in ("JA", "JP"): type_id = 88 # Japanese - if lang in ('ZH', 'CN', 'CMN'): + if lang in ("ZH", "CN", "CMN"): type_id = 90 # Chinese - if "documentary" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'documentary' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): - if meta['resolution'].startswith("720"): + if "documentary" in meta.get("genres", "").lower().replace(" ", "").replace( + "-", "" + ) or "documentary" in meta.get("keywords", "").lower().replace(" ", "").replace( + "-", "" + ): + if meta["resolution"].startswith("720"): type_id = 62 # 720p - if meta['resolution'].startswith("1080"): + if meta["resolution"].startswith("1080"): type_id = 63 # 1080 - if meta.get('is_disc', '') == 'BDMV': + if meta.get("is_disc", "") == "BDMV": type_id = 64 # BDMV - if "animation" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'animation' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): - if meta.get('sd', 1) == 0: + if "animation" in meta.get("genres", "").lower().replace(" ", "").replace( + "-", "" + ) or "animation" in meta.get("keywords", "").lower().replace(" ", "").replace( + "-", "" + ): + if meta.get("sd", 1) == 0: type_id = 58 - if meta['resolution'] in ("2160p"): + if meta["resolution"] in ("2160p"): type_id = 108 - if meta.get('is_disc', '') == 'BDMV': + if meta.get("is_disc", "") == "BDMV": type_id = 109 # I guess complete packs?: - # 103 = TV Shows KR - # 101 = TV Shows JP - # 60 = TV Shows + # 103 = TV Shows KR + # 101 = TV Shows JP + # 60 = TV Shows return type_id async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: - anon = 'no' + if ( + anon == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): + anon = "no" else: - anon = 'yes' + anon = "yes" return anon async def upload(self, meta, disctype): @@ -121,86 +141,113 @@ async def upload(self, meta, disctype): # # POST > upload/upload - if meta['bdinfo'] is not None: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') + if meta["bdinfo"] is not None: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ) else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ) - ttg_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + ttg_desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - with open(torrent_path, 'rb') as torrentFile: - if len(meta['filelist']) == 1: - torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) + with open(torrent_path, "rb") as torrentFile: + if len(meta["filelist"]) == 1: + torrentFileName = unidecode( + os.path.basename(meta["video"]).replace(" ", ".") + ) else: - torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) + torrentFileName = unidecode( + os.path.basename(meta["path"]).replace(" ", ".") + ) files = { - 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), - 'nfo': ("torrent.nfo", mi_dump) + "file": ( + f"{torrentFileName}.torrent", + torrentFile, + "application/x-bittorent", + ), + "nfo": ("torrent.nfo", mi_dump), } data = { - 'MAX_FILE_SIZE': '4000000', - 'team': '', - 'hr': 'no', - 'name': ttg_name, - 'type': await self.get_type_id(meta), - 'descr': ttg_desc.rstrip(), - - 'anonymity': await self.get_anon(meta['anon']), - 'nodistr': 'no', - + "MAX_FILE_SIZE": "4000000", + "team": "", + "hr": "no", + "name": ttg_name, + "type": await self.get_type_id(meta), + "descr": ttg_desc.rstrip(), + "anonymity": await self.get_anon(meta["anon"]), + "nodistr": "no", } url = "https://totheglory.im/takeupload.php" - if int(meta['imdb_id'].replace('tt', '')) != 0: - data['imdb_c'] = f"tt{meta.get('imdb_id', '').replace('tt', '')}" + if int(meta["imdb_id"].replace("tt", "")) != 0: + data["imdb_c"] = f"tt{meta.get('imdb_id', '').replace('tt', '')}" # Submit - if meta['debug']: + if meta["debug"]: console.print(url) console.print(data) else: with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") - with open(cookiefile, 'rb') as cf: + cookiefile = os.path.abspath( + f"{meta['base_dir']}/data/cookies/TTG.pkl" + ) + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) up = session.post(url=url, data=data, files=files) torrentFile.close() mi_dump.close() if up.url.startswith("https://totheglory.im/details.php?id="): - console.print(f"[green]Uploaded to: [yellow]{up.url}[/yellow][/green]") + console.print( + f"[green]Uploaded to: [yellow]{up.url}[/yellow][/green]" + ) id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) await self.download_new_torrent(id, torrent_path) else: console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 + raise UploadException( + f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", + "red", + ) # noqa #F405 return async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") - with open(cookiefile, 'rb') as cf: + with open(cookiefile, "rb") as cf: session.cookies.update(pickle.load(cf)) - if int(meta['imdb_id'].replace('tt', '')) != 0: + if int(meta["imdb_id"].replace("tt", "")) != 0: imdb = f"imdb{meta['imdb_id'].replace('tt', '')}" else: imdb = "" - if meta.get('is_disc', '') == "BDMV": + if meta.get("is_disc", "") == "BDMV": res_type = f"{meta['resolution']} Blu-ray" - elif meta.get('is_disc', '') == "DVD": + elif meta.get("is_disc", "") == "DVD": res_type = "DVD" else: - res_type = meta['resolution'] - search_url = f"https://totheglory.im/browse.php?search_field= {imdb} {res_type}" + res_type = meta["resolution"] + search_url = ( + f"https://totheglory.im/browse.php?search_field= {imdb} {res_type}" + ) r = session.get(search_url) await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) + soup = BeautifulSoup(r.text, "html.parser") + find = soup.find_all("a", href=True) for each in find: - if each['href'].startswith('/t/'): + if each["href"].startswith("/t/"): release = re.search(r"()()?(.*)Logout""") != -1: @@ -246,79 +295,104 @@ async def validate_cookies(self, meta, cookiefile): async def login(self, cookiefile): url = "https://totheglory.im/takelogin.php" data = { - 'username': self.username, - 'password': self.password, - 'passid': self.passid, - 'passan': self.passan + "username": self.username, + "password": self.password, + "passid": self.passid, + "passan": self.passan, } with requests.Session() as session: response = session.post(url, data=data) await asyncio.sleep(0.5) - if response.url.endswith('2fa.php'): - soup = BeautifulSoup(response.text, 'html.parser') - auth_token = soup.find('input', {'name': 'authenticity_token'}).get('value') + if response.url.endswith("2fa.php"): + soup = BeautifulSoup(response.text, "html.parser") + auth_token = soup.find("input", {"name": "authenticity_token"}).get( + "value" + ) two_factor_data = { - 'otp': console.input('[yellow]TTG 2FA Code: '), - 'authenticity_token': auth_token, - 'uid': self.uid + "otp": console.input("[yellow]TTG 2FA Code: "), + "authenticity_token": auth_token, + "uid": self.uid, } two_factor_url = "https://totheglory.im/take2fa.php" response = session.post(two_factor_url, data=two_factor_data) await asyncio.sleep(0.5) - if response.url.endswith('my.php'): - console.print('[green]Successfully logged into TTG') - with open(cookiefile, 'wb') as cf: + if response.url.endswith("my.php"): + console.print("[green]Successfully logged into TTG") + with open(cookiefile, "wb") as cf: pickle.dump(session.cookies, cf) else: - console.print('[bold red]Something went wrong') + console.print("[bold red]Something went wrong") await asyncio.sleep(1) console.print(response.text) console.print(response.url) return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: + base = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + with open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "w", + encoding="utf-8", + ) as descfile: from src.bbcode import BBCODE from src.trackers.COMMON import COMMON + common = COMMON(config=self.config) - if int(meta.get('imdb_id', '0').replace('tt', '')) != 0: + if int(meta.get("imdb_id", "0").replace("tt", "")) != 0: ptgen = await common.ptgen(meta) - if ptgen.strip() != '': + if ptgen.strip() != "": descfile.write(ptgen) # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: - descfile.write(f"[center][b][color=#ff00ff][size=3]{meta['service_longname']}的无损REMUX片源,没有转码/This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]") + if ( + meta["type"] == "WEBDL" + and meta.get("service_longname", "") != "" + and meta.get("description", None) is None + ): + descfile.write( + f"[center][b][color=#ff00ff][size=3]{meta['service_longname']}的无损REMUX片源,没有转码/This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]" + ) bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] + if meta.get("discs", []) != []: + discs = meta["discs"] for each in discs: - if each['type'] == "BDMV": - descfile.write(f"[quote={each.get('name', 'BDINFO')}]{each['summary']}[/quote]\n") + if each["type"] == "BDMV": + descfile.write( + f"[quote={each.get('name', 'BDINFO')}]{each['summary']}[/quote]\n" + ) descfile.write("\n") pass - if each['type'] == "DVD": + if each["type"] == "DVD": descfile.write(f"{each['name']}:\n") - descfile.write(f"[quote={os.path.basename(each['vob'])}][{each['vob_mi']}[/quote] [quote={os.path.basename(each['ifo'])}][{each['ifo_mi']}[/quote]\n") + descfile.write( + f"[quote={os.path.basename(each['vob'])}][{each['vob_mi']}[/quote] [quote={os.path.basename(each['ifo'])}][{each['ifo_mi']}[/quote]\n" + ) descfile.write("\n") else: - mi = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + mi = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", + "r", + encoding="utf-8", + ).read() descfile.write(f"[quote=MediaInfo]{mi}[/quote]") descfile.write("\n") desc = base desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = desc.replace('[img]', '[img]') + desc = desc.replace("[img]", "[img]") desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - images = meta['image_list'] + images = meta["image_list"] if len(images) > 0: descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - img_url = images[each]['img_url'] + for each in range(len(images[: int(meta["screens"])])): + web_url = images[each]["web_url"] + img_url = images[each]["img_url"] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") if self.signature is not None: @@ -333,5 +407,7 @@ async def download_new_torrent(self, id, torrent_path): with open(torrent_path, "wb") as tor: tor.write(r.content) else: - console.print("[red]There was an issue downloading the new .torrent from TTG") + console.print( + "[red]There was an issue downloading the new .torrent from TTG" + ) console.print(r.text) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 39555dfd0..980f5af38 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -17,9 +17,36 @@ def __init__(self, config): self.search_url = "https://upload.cx/api/torrents/filter" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ - "Tigole", "x0r", "Judas", "SPDVD", "MeGusta", "YIFY", "SWTYBLZ", "TAoE", "TSP", "TSPxL", "LAMA", "4K4U", "ION10", - "Will1869", "TGx", "Sicario", "QxR", "Hi10", "EMBER", "FGT", "AROMA", "d3g", "nikt0", "Grym", "RARBG", "iVy", "NuBz", - "NAHOM", "EDGE2020", "FnP", + "Tigole", + "x0r", + "Judas", + "SPDVD", + "MeGusta", + "YIFY", + "SWTYBLZ", + "TAoE", + "TSP", + "TSPxL", + "LAMA", + "4K4U", + "ION10", + "Will1869", + "TGx", + "Sicario", + "QxR", + "Hi10", + "EMBER", + "FGT", + "AROMA", + "d3g", + "nikt0", + "Grym", + "RARBG", + "iVy", + "NuBz", + "NAHOM", + "EDGE2020", + "FnP", ] async def get_cat_id(self, category_name): @@ -90,7 +117,8 @@ async def upload(self, meta): bd_dump = None desc = open( f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", - "r", encoding='utf-8', + "r", + encoding="utf-8", ).read() open_torrent = open( f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index d3bc06777..63a101756 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -9,7 +9,7 @@ from src.console import console -class UNIT3D_TEMPLATE(): +class UNIT3D_TEMPLATE: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -26,46 +26,46 @@ class UNIT3D_TEMPLATE(): def __init__(self, config): self.config = config - self.tracker = 'Abbreviated' - self.source_flag = 'Source flag for .torrent' - self.upload_url = 'https://domain.tld/api/torrents/upload' - self.search_url = 'https://domain.tld/api/torrents/filter' + self.tracker = "Abbreviated" + self.source_flag = "Source flag for .torrent" + self.upload_url = "https://domain.tld/api/torrents/upload" + self.search_url = "https://domain.tld/api/torrents/filter" self.signature = None self.banned_groups = [""] pass async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") return resolution_id ############################################################### @@ -75,71 +75,101 @@ async def get_res_id(self, resolution): async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} + + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -154,25 +184,27 @@ async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 30d16fea3..ad9b9d108 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -9,7 +9,7 @@ from src.console import console -class UTP(): +class UTP: """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -17,13 +17,14 @@ class UTP(): Set type/category IDs Upload """ + def __init__(self, config): self.config = config - self.tracker = 'UTP' - self.source_flag = 'UTOPIA' - self.search_url = 'https://utp.to/api/torrents/filter' - self.torrent_url = 'https://utp.to/api/torrents/' - self.upload_url = 'https://utp.to/api/torrents/upload' + self.tracker = "UTP" + self.source_flag = "UTOPIA" + self.search_url = "https://utp.to/api/torrents/filter" + self.torrent_url = "https://utp.to/api/torrents/" + self.upload_url = "https://utp.to/api/torrents/upload" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [] pass @@ -31,71 +32,105 @@ def __init__(self, config): async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) - cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + await common.unit3d_edit_desc( + meta, self.tracker, self.signature, comparison=True + ) + cat_id = await self.get_cat_id(meta["category"], meta.get("edition", "")) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if ( + meta["anon"] == 0 + and bool( + str2bool( + str(self.config["TRACKERS"][self.tracker].get("anon", "False")) + ) + ) + is False + ): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: + if meta["bdinfo"] is not None: mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", + "r", + encoding="utf-8", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", + "rb", + ) + files = { + "torrent": ("placeholder.torrent", open_torrent, "application/x-bittorrent") + } data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 + if self.config["TRACKERS"][self.tracker].get("internal", False) is True: + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 if region_id != 0: - data['region_id'] = region_id + data["region_id"] = region_id if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + if meta["debug"] is False: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) try: console.print(response.json()) except Exception: @@ -108,60 +143,59 @@ async def upload(self, meta, disctype): open_torrent.close() async def get_cat_id(self, category_name, edition): - category_id = { - 'MOVIE': '1', - 'TV': '2', - 'FANRES': '3' - }.get(category_name, '0') - if category_name == 'MOVIE' and 'FANRES' in edition: - category_id = '3' + category_id = {"MOVIE": "1", "TV": "2", "FANRES": "3"}.get(category_name, "0") + if category_name == "MOVIE" and "FANRES" in edition: + category_id = "3" return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") return type_id async def get_res_id(self, resolution): - resolution_id = { - '4320p': '1', - '2160p': '2', - '1080p': '3', - '1080i': '4' - }.get(resolution, '1') + resolution_id = {"4320p": "1", "2160p": "2", "1080p": "3", "1080i": "4"}.get( + resolution, "1" + ) return resolution_id async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id( + meta["category"], meta.get("edition", "") + ), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", } - if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" + if meta["category"] == "TV": + params["name"] = ( + params["name"] + f" {meta.get('season', '')}{meta.get('episode', '')}" + ) + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] + for each in response["data"]: + result = [each][0]["attributes"]["name"] # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + console.print( + "[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect" + ) await asyncio.sleep(5) return dupes diff --git a/src/vs.py b/src/vs.py index 7fb918cfe..6b2931caf 100644 --- a/src/vs.py +++ b/src/vs.py @@ -20,7 +20,8 @@ def FrameProps(n, f, clip): def optimize_images(image, config): import platform # Ensure platform is imported here - if config.get('optimize_images', True): + + if config.get("optimize_images", True): if os.path.exists(image): try: pyver = platform.python_version_tuple() @@ -35,9 +36,11 @@ def optimize_images(image, config): return -def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", config=None): +def vs_screengn( + source, encode=None, filter_b_frames=False, num=5, dir=".", config=None +): if config is None: - config = {'optimize_images': True} # Default configuration + config = {"optimize_images": True} # Default configuration screens_file = os.path.join(dir, "screens.txt") diff --git a/upload.py b/upload.py index 2338d6f73..44df6366b 100644 --- a/upload.py +++ b/upload.py @@ -58,7 +58,7 @@ from rich.style import Style -cli_ui.setup(color='always', title="L4G's Upload Assistant") +cli_ui.setup(color="always", title="L4G's Upload Assistant") base_dir = os.path.abspath(os.path.dirname(__file__)) @@ -68,23 +68,54 @@ if not os.path.exists(os.path.abspath(f"{base_dir}/data/config.py")): try: if os.path.exists(os.path.abspath(f"{base_dir}/data/config.json")): - with open(f"{base_dir}/data/config.json", 'r', encoding='utf-8-sig') as f: + with open( + f"{base_dir}/data/config.json", "r", encoding="utf-8-sig" + ) as f: json_config = json.load(f) f.close() - with open(f"{base_dir}/data/config.py", 'w') as f: + with open(f"{base_dir}/data/config.py", "w") as f: f.write(f"config = {json.dumps(json_config, indent=4)}") f.close() - cli_ui.info(cli_ui.green, "Successfully updated config from .json to .py") - cli_ui.info(cli_ui.green, "It is now safe for you to delete", cli_ui.yellow, "data/config.json", "if you wish") + cli_ui.info( + cli_ui.green, "Successfully updated config from .json to .py" + ) + cli_ui.info( + cli_ui.green, + "It is now safe for you to delete", + cli_ui.yellow, + "data/config.json", + "if you wish", + ) from data.config import config else: raise NotImplementedError except Exception: - cli_ui.info(cli_ui.red, "We have switched from .json to .py for config to have a much more lenient experience") + cli_ui.info( + cli_ui.red, + "We have switched from .json to .py for config to have a much more lenient experience", + ) cli_ui.info(cli_ui.red, "Looks like the auto updater didnt work though") cli_ui.info(cli_ui.red, "Updating is just 2 easy steps:") - cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) - cli_ui.info(cli_ui.red, "2: Add", cli_ui.green, "config = ", cli_ui.red, "to the beginning of", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) + cli_ui.info( + cli_ui.red, + "1: Rename", + cli_ui.yellow, + os.path.abspath(f"{base_dir}/data/config.json"), + cli_ui.red, + "to", + cli_ui.green, + os.path.abspath(f"{base_dir}/data/config.py"), + ) + cli_ui.info( + cli_ui.red, + "2: Add", + cli_ui.green, + "config = ", + cli_ui.red, + "to the beginning of", + cli_ui.green, + os.path.abspath(f"{base_dir}/data/config.py"), + ) exit() else: console.print(traceback.print_exc()) @@ -94,37 +125,43 @@ async def do_the_thing(base_dir): meta = dict() - meta['base_dir'] = base_dir + meta["base_dir"] = base_dir paths = [] for each in sys.argv[1:]: if os.path.exists(each): paths.append(os.path.abspath(each)) else: break - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - if meta['cleanup'] and os.path.exists(f"{base_dir}/tmp"): + meta, help, before_args = parser.parse( + tuple(" ".join(sys.argv[1:]).split(" ")), meta + ) + if meta["cleanup"] and os.path.exists(f"{base_dir}/tmp"): shutil.rmtree(f"{base_dir}/tmp") console.print("[bold green]Sucessfully emptied tmp directory") - if not meta['path']: + if not meta["path"]: exit(0) - path = meta['path'] + path = meta["path"] path = os.path.abspath(path) if path.endswith('"'): path = path[:-1] queue = [] if os.path.exists(path): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) + meta, help, before_args = parser.parse( + tuple(" ".join(sys.argv[1:]).split(" ")), meta + ) queue = [path] else: # Search glob if dirname exists if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: - escaped_path = path.replace('[', '[[]') + escaped_path = path.replace("[", "[[]") globs = glob.glob(escaped_path) queue = globs if len(queue) != 0: md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n[bold green]Queuing these files:[/bold green]", end="") + console.print( + Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color="cyan")) + ) console.print("\n\n") else: console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") @@ -132,15 +169,19 @@ async def do_the_thing(base_dir): elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: queue = paths md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n[bold green]Queuing these files:[/bold green]", end="") + console.print( + Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color="cyan")) + ) console.print("\n\n") elif not os.path.exists(os.path.dirname(path)): split_path = path.split() p1 = split_path[0] for i, each in enumerate(split_path): try: - if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): + if os.path.exists(p1) and not os.path.exists( + f"{p1} {split_path[i + 1]}" + ): queue.append(p1) p1 = split_path[i + 1] else: @@ -149,31 +190,76 @@ async def do_the_thing(base_dir): if os.path.exists(p1): queue.append(p1) else: - console.print(f"[red]Path: [bold red]{p1}[/bold red] does not exist") + console.print( + f"[red]Path: [bold red]{p1}[/bold red] does not exist" + ) if len(queue) >= 1: md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n[bold green]Queuing these files:[/bold green]", end="") + console.print( + Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color="cyan")) + ) console.print("\n\n") else: # Add Search Here - console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") + console.print( + "[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used." + ) exit() base_meta = {k: v for k, v in meta.items()} for path in queue: meta = {k: v for k, v in base_meta.items()} - meta['path'] = path - meta['uuid'] = None + meta["path"] = path + meta["uuid"] = None try: with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: saved_meta = json.load(f) for key, value in saved_meta.items(): overwrite_list = [ - 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'modq', 'region', 'freeleech', - 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs' + "trackers", + "dupe", + "debug", + "anon", + "category", + "type", + "screens", + "nohash", + "manual_edition", + "imdb", + "tmdb_manual", + "mal", + "manual", + "hdb", + "ptp", + "blu", + "no_season", + "no_aka", + "no_year", + "no_dub", + "no_tag", + "no_seed", + "client", + "desclink", + "descfile", + "desc", + "draft", + "modq", + "region", + "freeleech", + "personalrelease", + "unattended", + "season", + "episode", + "torrent_creation", + "qbit_tag", + "qbit_cat", + "skip_imghost_upload", + "imghost", + "manual_source", + "webdv", + "hardcoded-subs", ] if meta.get(key, None) != value and key in overwrite_list: saved_meta[key] = meta[key] @@ -182,89 +268,187 @@ async def do_the_thing(base_dir): except FileNotFoundError: pass console.print(f"[green]Gathering info for {os.path.basename(path)}") - if meta['imghost'] is None: - meta['imghost'] = config['DEFAULT']['img_host_1'] - if not meta['unattended']: - ua = config['DEFAULT'].get('auto_mode', False) + if meta["imghost"] is None: + meta["imghost"] = config["DEFAULT"]["img_host_1"] + if not meta["unattended"]: + ua = config["DEFAULT"].get("auto_mode", False) if str(ua).lower() == "true": - meta['unattended'] = True + meta["unattended"] = True console.print("[yellow]Running in Auto Mode") - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - meta = await prep.gather_prep(meta=meta, mode='cli') - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: + prep = Prep(screens=meta["screens"], img_host=meta["imghost"], config=config) + meta = await prep.gather_prep(meta=meta, mode="cli") + ( + meta["name_notag"], + meta["name"], + meta["clean_name"], + meta["potential_missing"], + ) = await prep.get_name(meta) + + if ( + meta.get("image_list", False) in (False, []) + and meta.get("skip_imghost_upload", False) is False + ): return_dict = {} - meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) - if meta['debug']: - console.print(meta['image_list']) + meta["image_list"], dummy_var = prep.upload_screens( + meta, meta["screens"], 1, 0, meta["screens"], [], return_dict + ) + if meta["debug"]: + console.print(meta["image_list"]) # meta['uploaded_screens'] = True - elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: - meta['image_list'] = [] - - if not os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")): + elif ( + meta.get("skip_imghost_upload", False) is True + and meta.get("image_list", False) is False + ): + meta["image_list"] = [] + + if not os.path.exists( + os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + ): reuse_torrent = None - if meta.get('rehash', False) is False: + if meta.get("rehash", False) is False: reuse_torrent = await client.find_existing_torrent(meta) if reuse_torrent is not None: - prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - if meta['nohash'] is False and reuse_torrent is None: - prep.create_torrent(meta, Path(meta['path']), "BASE") - if meta['nohash']: - meta['client'] = "none" - elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) is True and meta['nohash'] is False: - prep.create_torrent(meta, Path(meta['path']), "BASE") - if int(meta.get('randomized', 0)) >= 1: - prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) - - if meta.get('trackers', None) is not None: - trackers = meta['trackers'] + prep.create_base_from_existing_torrent( + reuse_torrent, meta["base_dir"], meta["uuid"] + ) + if meta["nohash"] is False and reuse_torrent is None: + prep.create_torrent(meta, Path(meta["path"]), "BASE") + if meta["nohash"]: + meta["client"] = "none" + elif ( + os.path.exists( + os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + ) + and meta.get("rehash", False) is True + and meta["nohash"] is False + ): + prep.create_torrent(meta, Path(meta["path"]), "BASE") + if int(meta.get("randomized", 0)) >= 1: + prep.create_random_torrents( + meta["base_dir"], meta["uuid"], meta["randomized"], meta["path"] + ) + + if meta.get("trackers", None) is not None: + trackers = meta["trackers"] else: - trackers = config['TRACKERS']['default_trackers'] + trackers = config["TRACKERS"]["default_trackers"] if "," in trackers: - trackers = trackers.split(',') - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + trackers = trackers.split(",") + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", "w") as f: json.dump(meta, f, indent=4) f.close() confirm = get_confirmation(meta) while confirm is False: # help.print_help() - editargs = cli_ui.ask_string("Input args that need correction e.g.(--tag NTb --category tv --tmdb 12345)") - editargs = (meta['path'],) + tuple(editargs.split()) - if meta['debug']: + editargs = cli_ui.ask_string( + "Input args that need correction e.g.(--tag NTb --category tv --tmdb 12345)" + ) + editargs = (meta["path"],) + tuple(editargs.split()) + if meta["debug"]: editargs = editargs + ("--debug",) meta, help, before_args = parser.parse(editargs, meta) # meta = await prep.tmdb_other_meta(meta) - meta['edit'] = True - meta = await prep.gather_prep(meta=meta, mode='cli') - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) + meta["edit"] = True + meta = await prep.gather_prep(meta=meta, mode="cli") + ( + meta["name_notag"], + meta["name"], + meta["clean_name"], + meta["potential_missing"], + ) = await prep.get_name(meta) confirm = get_confirmation(meta) if isinstance(trackers, list) is False: trackers = [trackers] trackers = [s.strip().upper() for s in trackers] - if meta.get('manual', False): + if meta.get("manual", False): trackers.insert(0, "MANUAL") #################################### ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL', 'TIK', 'PSS', 'ULCX'] - http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] + api_trackers = [ + "BLU", + "AITHER", + "STC", + "R4E", + "STT", + "RF", + "ACM", + "LCD", + "HUNO", + "SN", + "LT", + "NBL", + "ANT", + "JPTV", + "TDC", + "OE", + "BHDTV", + "RTF", + "OTW", + "FNP", + "CBR", + "UTP", + "AL", + "SHRI", + "LST", + "BHD", + "TL", + "TIK", + "PSS", + "ULCX", + ] + http_trackers = ["HDB", "TTG", "FL", "PTER", "HDT", "MTV"] tracker_class_map = { - 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, - 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, - 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, - 'SHRI': SHRI, 'PSS': PSS, 'ULCX': ULCX} + "BLU": BLU, + "BHD": BHD, + "AITHER": AITHER, + "STC": STC, + "R4E": R4E, + "THR": THR, + "STT": STT, + "HP": HP, + "PTP": PTP, + "RF": RF, + "SN": SN, + "TIK": TIK, + "ACM": ACM, + "HDB": HDB, + "LCD": LCD, + "TTG": TTG, + "LST": LST, + "HUNO": HUNO, + "FL": FL, + "LT": LT, + "NBL": NBL, + "ANT": ANT, + "PTER": PTER, + "JPTV": JPTV, + "TL": TL, + "TDC": TDC, + "HDT": HDT, + "MTV": MTV, + "OE": OE, + "BHDTV": BHDTV, + "RTF": RTF, + "OTW": OTW, + "FNP": FNP, + "CBR": CBR, + "UTP": UTP, + "AL": AL, + "SHRI": SHRI, + "PSS": PSS, + "ULCX": ULCX, + } tracker_capabilities = { - 'LST': {'mod_q': True, 'draft': True}, - 'BLU': {'mod_q': True, 'draft': False}, - 'AITHER': {'mod_q': True, 'draft': False}, - 'BHD': {'draft_live': True}, - 'ULCX': {'mod_q': True} + "LST": {"mod_q": True, "draft": True}, + "BLU": {"mod_q": True, "draft": False}, + "AITHER": {"mod_q": True, "draft": False}, + "BHD": {"draft_live": True}, + "ULCX": {"mod_q": True}, } async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): @@ -273,28 +457,28 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) # Handle BHD specific draft/live logic - if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): + if tracker_class.tracker == "BHD" and tracker_caps.get("draft_live"): draft_int = await tracker_class.get_live(meta) draft = "Draft" if draft_int == 0 else "Live" # Handle mod_q and draft for other trackers else: - if tracker_caps.get('mod_q'): - modq = await tracker_class.get_flag(meta, 'modq') - modq = 'Yes' if modq else 'No' - if tracker_caps.get('draft'): - draft = await tracker_class.get_flag(meta, 'draft') - draft = 'Yes' if draft else 'No' + if tracker_caps.get("mod_q"): + modq = await tracker_class.get_flag(meta, "modq") + modq = "Yes" if modq else "No" + if tracker_caps.get("draft"): + draft = await tracker_class.get_flag(meta, "draft") + draft = "Yes" if draft else "No" return modq, draft for tracker in trackers: - disctype = meta.get('disctype', None) + disctype = meta.get("disctype", None) tracker = tracker.replace(" ", "").upper().strip() - if meta['name'].endswith('DUPE?'): - meta['name'] = meta['name'].replace(' DUPE?', '') + if meta["name"].endswith("DUPE?"): + meta["name"] = meta["name"].replace(" DUPE?", "") - if meta['debug']: + if meta["debug"]: debug = "(DEBUG)" else: debug = "" @@ -302,20 +486,22 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) - if meta['unattended']: + if meta["unattended"]: upload_to_tracker = True else: try: upload_to_tracker = cli_ui.ask_yes_no( f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] + default=meta["unattended"], ) except (KeyboardInterrupt, EOFError): sys.exit(1) # Exit immediately if upload_to_tracker: # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + modq, draft = await check_mod_q_and_draft( + tracker_class, meta, debug, disctype + ) # Print mod_q and draft info if relevant if modq is not None: @@ -326,7 +512,9 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): console.print(f"Uploading to {tracker_class.tracker}") # Check if the group is banned for the tracker - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): + if check_banned_group( + tracker_class.tracker, tracker_class.banned_groups, meta + ): continue # Perform the existing checks for dupes except TL @@ -339,131 +527,158 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): meta = dupe_check(dupes, meta) # Proceed with upload if the meta is set to upload - if tracker == "TL" or meta.get('upload', False): + if tracker == "TL" or meta.get("upload", False): await tracker_class.upload(meta, disctype) - if tracker == 'SN': + if tracker == "SN": await asyncio.sleep(16) await client.add_to_client(meta, tracker_class.tracker) if tracker in http_trackers: tracker_class = tracker_class_map[tracker](config=config) - if meta['unattended']: + if meta["unattended"]: upload_to_tracker = True else: try: upload_to_tracker = cli_ui.ask_yes_no( f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] + default=meta["unattended"], ) except (KeyboardInterrupt, EOFError): sys.exit(1) # Exit immediately if upload_to_tracker: console.print(f"Uploading to {tracker}") - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): + if check_banned_group( + tracker_class.tracker, tracker_class.banned_groups, meta + ): continue if await tracker_class.validate_credentials(meta) is True: dupes = await tracker_class.search_existing(meta, disctype) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] is True: + if meta["upload"] is True: await tracker_class.upload(meta, disctype) await client.add_to_client(meta, tracker_class.tracker) if tracker == "MANUAL": - if meta['unattended']: + if meta["unattended"]: do_manual = True else: - do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) + do_manual = cli_ui.ask_yes_no( + "Get files for manual upload?", default=True + ) if do_manual: for manual_tracker in trackers: - if manual_tracker != 'MANUAL': - manual_tracker = manual_tracker.replace(" ", "").upper().strip() - tracker_class = tracker_class_map[manual_tracker](config=config) + if manual_tracker != "MANUAL": + manual_tracker = ( + manual_tracker.replace(" ", "").upper().strip() + ) + tracker_class = tracker_class_map[manual_tracker]( + config=config + ) if manual_tracker in api_trackers: - await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) + await common.unit3d_edit_desc( + meta, tracker_class.tracker, tracker_class.signature + ) else: await tracker_class.edit_desc(meta) url = await prep.package(meta) if url is False: - console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") + console.print( + f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}" + ) else: console.print(f"[green]{meta['name']}") - console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") + console.print( + f"[green]Files can be found at: [yellow]{url}[/yellow]" + ) if tracker == "THR": - if meta['unattended']: + if meta["unattended"]: upload_to_thr = True else: try: upload_to_ptp = cli_ui.ask_yes_no( - f"Upload to THR? {debug}", - default=meta['unattended'] + f"Upload to THR? {debug}", default=meta["unattended"] ) except (KeyboardInterrupt, EOFError): sys.exit(1) # Exit immediately if upload_to_thr: console.print("Uploading to THR") # nable to get IMDB id/Youtube Link - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) is None: - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") - meta['youtube'] = youtube + if meta.get("imdb_id", "0") == "0": + imdb_id = cli_ui.ask_string( + "Unable to find IMDB id, please enter e.g.(tt1234567)" + ) + meta["imdb_id"] = imdb_id.replace("tt", "").zfill(7) + if meta.get("youtube", None) is None: + youtube = cli_ui.ask_string( + "Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)" + ) + meta["youtube"] = youtube thr = THR(config=config) try: with requests.Session() as session: console.print("[yellow]Logging in to THR") session = thr.login(session) console.print("[yellow]Searching for Dupes") - dupes = thr.search_existing(session, disctype, meta.get('imdb_id')) + dupes = thr.search_existing( + session, disctype, meta.get("imdb_id") + ) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] is True: + if meta["upload"] is True: await thr.upload(session, meta, disctype) await client.add_to_client(meta, "THR") except Exception: console.print(traceback.print_exc()) if tracker == "PTP": - if meta['unattended']: + if meta["unattended"]: upload_to_ptp = True else: try: upload_to_ptp = cli_ui.ask_yes_no( - f"Upload to {tracker}? {debug}", - default=meta['unattended'] + f"Upload to {tracker}? {debug}", default=meta["unattended"] ) except (KeyboardInterrupt, EOFError): sys.exit(1) # Exit immediately if upload_to_ptp: # Ensure the variable is defined before this check console.print(f"Uploading to {tracker}") - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + if meta.get("imdb_id", "0") == "0": + imdb_id = cli_ui.ask_string( + "Unable to find IMDB id, please enter e.g.(tt1234567)" + ) + meta["imdb_id"] = imdb_id.replace("tt", "").zfill(7) ptp = PTP(config=config) if check_banned_group("PTP", ptp.banned_groups, meta): continue try: console.print("[yellow]Searching for Group ID") - groupID = await ptp.get_group_by_imdb(meta['imdb_id']) + groupID = await ptp.get_group_by_imdb(meta["imdb_id"]) if groupID is None: console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") - meta['youtube'] = youtube - meta['upload'] = True + if meta.get( + "youtube", None + ) is None or "youtube" not in str(meta.get("youtube", "")): + youtube = cli_ui.ask_string( + "Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", + default="", + ) + meta["youtube"] = youtube + meta["upload"] = True else: console.print("[yellow]Searching for Existing Releases") dupes = await ptp.search_existing(groupID, meta, disctype) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta.get('imdb_info', {}) == {}: - meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] is True: + if meta.get("imdb_info", {}) == {}: + meta["imdb_info"] = await prep.get_imdb_info( + meta["imdb_id"], meta + ) + if meta["upload"] is True: ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) await ptp.upload(meta, ptpUrl, ptpData, disctype) await asyncio.sleep(5) @@ -473,7 +688,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): def get_confirmation(meta): - if meta['debug'] is True: + if meta["debug"] is True: console.print("[bold red]DEBUG: True") console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") console.print() @@ -483,46 +698,55 @@ def get_confirmation(meta): cli_ui.info(f"Overview: {meta['overview']}") console.print() cli_ui.info(f"Category: {meta['category']}") - if int(meta.get('tmdb', 0)) != 0: - cli_ui.info(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") - if int(meta.get('imdb_id', '0')) != 0: + if int(meta.get("tmdb", 0)) != 0: + cli_ui.info( + f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}" + ) + if int(meta.get("imdb_id", "0")) != 0: cli_ui.info(f"IMDB: https://www.imdb.com/title/tt{meta['imdb_id']}") - if int(meta.get('tvdb_id', '0')) != 0: + if int(meta.get("tvdb_id", "0")) != 0: cli_ui.info(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") - if int(meta.get('mal_id', 0)) != 0: + if int(meta.get("mal_id", 0)) != 0: cli_ui.info(f"MAL : https://myanimelist.net/anime/{meta['mal_id']}") console.print() - if int(meta.get('freeleech', '0')) != 0: + if int(meta.get("freeleech", "0")) != 0: cli_ui.info(f"Freeleech: {meta['freeleech']}") - if meta['tag'] == "": + if meta["tag"] == "": tag = "" else: tag = f" / {meta['tag'][1:]}" - if meta['is_disc'] == "DVD": - res = meta['source'] + if meta["is_disc"] == "DVD": + res = meta["source"] else: - res = meta['resolution'] + res = meta["resolution"] cli_ui.info(f"{res} / {meta['type']}{tag}") - if meta.get('personalrelease', False) is True: + if meta.get("personalrelease", False) is True: cli_ui.info("Personal Release!") console.print() - if meta.get('unattended', False) is False: + if meta.get("unattended", False) is False: get_missing(meta) - ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" # \a rings the bell + ring_the_bell = ( + "\a" if config["DEFAULT"].get("sfx_on_prompt", True) is True else "" + ) # \a rings the bell cli_ui.info(ring_the_bell) # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' - if meta.get('is disc', False): - meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True - - if meta['isdir']: - if 'keep_folder' in meta: - if meta['keep_folder']: + if meta.get("is disc", False): + meta["keep_folder"] = ( + False # Ensure 'keep_folder' is False if 'is disc' is True + ) + + if meta["isdir"]: + if "keep_folder" in meta: + if meta["keep_folder"]: cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") - kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) + kf_confirm = cli_ui.ask_yes_no( + "You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", + default=False, + ) if not kf_confirm: - cli_ui.info('Aborting...') + cli_ui.info("Aborting...") exit() cli_ui.info_section(cli_ui.yellow, "Is this correct?") @@ -542,7 +766,7 @@ def get_confirmation(meta): def dupe_check(dupes, meta): if not dupes: console.print("[green]No dupes found") - meta['upload'] = True + meta["upload"] = True return meta else: console.print() @@ -550,45 +774,53 @@ def dupe_check(dupes, meta): console.print() cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") cli_ui.info(dupe_text) - if meta['unattended']: - if meta.get('dupe', False) is False: - console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") + if meta["unattended"]: + if meta.get("dupe", False) is False: + console.print( + "[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check" + ) upload = False else: - console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") + console.print( + "[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways" + ) upload = True console.print() - if not meta['unattended']: - if meta.get('dupe', False) is False: + if not meta["unattended"]: + if meta.get("dupe", False) is False: upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) else: upload = True if upload is False: - meta['upload'] = False + meta["upload"] = False else: - meta['upload'] = True + meta["upload"] = True for each in dupes: - if each == meta['name']: - meta['name'] = f"{meta['name']} DUPE?" + if each == meta["name"]: + meta["name"] = f"{meta['name']} DUPE?" return meta # Return True if banned group def check_banned_group(tracker, banned_group_list, meta): - if meta['tag'] == "": + if meta["tag"] == "": return False else: q = False for tag in banned_group_list: if isinstance(tag, list): - if meta['tag'][1:].lower() == tag[0].lower(): - console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") + if meta["tag"][1:].lower() == tag[0].lower(): + console.print( + f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups." + ) console.print(f"[bold red]NOTE: [bold yellow]{tag[1]}") q = True else: - if meta['tag'][1:].lower() == tag.lower(): - console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") + if meta["tag"][1:].lower() == tag.lower(): + console.print( + f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups." + ) q = True if q: if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): @@ -598,27 +830,27 @@ def check_banned_group(tracker, banned_group_list, meta): def get_missing(meta): info_notes = { - 'edition': 'Special Edition/Release', - 'description': "Please include Remux/Encode Notes if possible (either here or edit your upload)", - 'service': "WEB Service e.g.(AMZN, NF)", - 'region': "Disc Region", - 'imdb': 'IMDb ID (tt1234567)', - 'distributor': "Disc Distributor e.g.(BFI, Criterion, etc)" + "edition": "Special Edition/Release", + "description": "Please include Remux/Encode Notes if possible (either here or edit your upload)", + "service": "WEB Service e.g.(AMZN, NF)", + "region": "Disc Region", + "imdb": "IMDb ID (tt1234567)", + "distributor": "Disc Distributor e.g.(BFI, Criterion, etc)", } missing = [] - if meta.get('imdb_id', '0') == '0': - meta['imdb_id'] = '0' - meta['potential_missing'].append('imdb_id') - if len(meta['potential_missing']) > 0: - for each in meta['potential_missing']: - if str(meta.get(each, '')).replace(' ', '') in ["", "None", "0"]: + if meta.get("imdb_id", "0") == "0": + meta["imdb_id"] = "0" + meta["potential_missing"].append("imdb_id") + if len(meta["potential_missing"]) > 0: + for each in meta["potential_missing"]: + if str(meta.get(each, "")).replace(" ", "") in ["", "None", "0"]: if each == "imdb_id": - each = 'imdb' + each = "imdb" missing.append(f"--{each} | {info_notes.get(each)}") if missing != []: cli_ui.info_section(cli_ui.yellow, "Potentially missing information:") for each in missing: - if each.split('|')[0].replace('--', '').strip() in ["imdb"]: + if each.split("|")[0].replace("--", "").strip() in ["imdb"]: cli_ui.info(cli_ui.red, each) else: cli_ui.info(each) @@ -627,10 +859,12 @@ def get_missing(meta): return -if __name__ == '__main__': +if __name__ == "__main__": pyver = platform.python_version_tuple() if int(pyver[0]) != 3 or int(pyver[1]) < 12: - console.print("[bold red]Python version is too low. Please use Python 3.12 or higher.") + console.print( + "[bold red]Python version is too low. Please use Python 3.12 or higher." + ) sys.exit(1) try: