diff --git a/README.md b/README.md index 57516e11..eadfde50 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/PTT/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP/YOINK + - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HHD/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/PTT/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP/YOINK - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 743075b6..69e16f25 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -25,10 +25,8 @@ # Number of screenshots to capture "screens": "6", - # Tonemap HDR screenshots and set task limit when tonemapping - # When tonemapping, out of memory errors are more likely to occur with higher task limits + # Tonemap HDR screenshots "tone_map": False, - "tone_task_limit": "1", # Number of cutoff screenshots # If there are at least this many screenshots already, perhaps pulled from existing @@ -36,10 +34,9 @@ "cutoff_screens": "3", # multi processing task limit - # When capturing/optimizing images, limit to this many concurrent tasks - # Causes issues on UNIX based OS when task_limit > 1 - # defaults to os.cpu_count() if thiss value not set - "task_limit": "1", + # When capturing/optimizing/uploading images, limit to this many concurrent tasks + # defaults to os.cpu_count() if this value not set + # "task_limit": "1", # Providing the option to change the size of the screenshot thumbnails where supported. # Default is 350, ie [img=350] @@ -88,10 +85,6 @@ # Play the bell sound effect when asking for confirmation "sfx_on_prompt": True, - # Run an API search after upload to find the permalink and insert as comment in torrent - # Needs a 5 second wait to ensure the API is updated - "get_permalink": False, - # How many trackers need to pass successfull checking to continue with the upload process # Default = 1. If 1 (or more) tracker/s pass banned_group and dupe checking, uploading will continue # If less than the number of trackers pass the checking, exit immediately. diff --git a/src/bbcode.py b/src/bbcode.py index 97e8b215..d3ee0e90 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -185,7 +185,7 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") comp_placeholders.append(comp) - # Remove Images in IMG tags: + # Remove Images in IMG tags desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) @@ -309,7 +309,9 @@ def clean_unit3d_description(self, desc, site): Auto\sUploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]| \[center\]\s*\[b\]Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] \sAuto\sUploader\[\/b\]\s*\[\/center\]| - \[center\]\[url=https:\/\/github\.com\/z-ink\/uploadrr\]\[img=\d+\]https:\/\/i\.ibb\.co\/2NVWb0c\/uploadrr\.webp\[\/img\]\[\/url\]\[\/center\] + \[center\]\[url=https:\/\/github\.com\/z-ink\/uploadrr\]\[img=\d+\]https:\/\/i\.ibb\.co\/2NVWb0c\/uploadrr\.webp\[\/img\]\[\/url\]\[\/center\]| + \n\[center\]\[url=https:\/\/github\.com\/edge20200\/Only-Uploader\]Powered\sby\s + Only-Uploader\[\/url\]\[\/center\] """ desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) diff --git a/src/exportmi.py b/src/exportmi.py new file mode 100644 index 00000000..f0e03f33 --- /dev/null +++ b/src/exportmi.py @@ -0,0 +1,246 @@ +from src.console import console +from pymediainfo import MediaInfo +import json +import os + + +async def mi_resolution(res, guess, width, scan, height, actual_height): + res_map = { + "3840x2160p": "2160p", "2160p": "2160p", + "2560x1440p": "1440p", "1440p": "1440p", + "1920x1080p": "1080p", "1080p": "1080p", + "1920x1080i": "1080i", "1080i": "1080i", + "1280x720p": "720p", "720p": "720p", + "1280x540p": "720p", "1280x576p": "720p", + "1024x576p": "576p", "576p": "576p", + "1024x576i": "576i", "576i": "576i", + "854x480p": "480p", "480p": "480p", + "854x480i": "480i", "480i": "480i", + "720x576p": "576p", "576p": "576p", + "720x576i": "576i", "576i": "576i", + "720x480p": "480p", "480p": "480p", + "720x480i": "480i", "480i": "480i", + "15360x8640p": "8640p", "8640p": "8640p", + "7680x4320p": "4320p", "4320p": "4320p", + "OTHER": "OTHER"} + resolution = res_map.get(res, None) + if actual_height == 540: + resolution = "OTHER" + if resolution is None: + try: + resolution = guess['screen_size'] + except Exception: + width_map = { + '3840p': '2160p', + '2560p': '1550p', + '1920p': '1080p', + '1920i': '1080i', + '1280p': '720p', + '1024p': '576p', + '1024i': '576i', + '854p': '480p', + '854i': '480i', + '720p': '576p', + '720i': '576i', + '15360p': '4320p', + 'OTHERp': 'OTHER' + } + resolution = width_map.get(f"{width}{scan}", "OTHER") + resolution = await mi_resolution(resolution, guess, width, scan, height, actual_height) + + return resolution + + +async def exportInfo(video, isdir, folder_id, base_dir, export_text): + def filter_mediainfo(data): + filtered = { + "creatingLibrary": data.get("creatingLibrary"), + "media": { + "@ref": data["media"]["@ref"], + "track": [] + } + } + + for track in data["media"]["track"]: + if track["@type"] == "General": + filtered["media"]["track"].append({ + "@type": track["@type"], + "UniqueID": track.get("UniqueID", {}), + "VideoCount": track.get("VideoCount", {}), + "AudioCount": track.get("AudioCount", {}), + "TextCount": track.get("TextCount", {}), + "MenuCount": track.get("MenuCount", {}), + "FileExtension": track.get("FileExtension", {}), + "Format": track.get("Format", {}), + "Format_Version": track.get("Format_Version", {}), + "FileSize": track.get("FileSize", {}), + "Duration": track.get("Duration", {}), + "OverallBitRate": track.get("OverallBitRate", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "StreamSize": track.get("StreamSize", {}), + "IsStreamable": track.get("IsStreamable", {}), + "File_Created_Date": track.get("File_Created_Date", {}), + "File_Created_Date_Local": track.get("File_Created_Date_Local", {}), + "File_Modified_Date": track.get("File_Modified_Date", {}), + "File_Modified_Date_Local": track.get("File_Modified_Date_Local", {}), + "Encoded_Application": track.get("Encoded_Application", {}), + "Encoded_Library": track.get("Encoded_Library", {}), + }) + elif track["@type"] == "Video": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "Format_Profile": track.get("Format_Profile", {}), + "Format_Version": track.get("Format_Version", {}), + "Format_Level": track.get("Format_Level", {}), + "Format_Tier": track.get("Format_Tier", {}), + "HDR_Format": track.get("HDR_Format", {}), + "HDR_Format_Version": track.get("HDR_Format_Version", {}), + "HDR_Format_String": track.get("HDR_Format_String", {}), + "HDR_Format_Profile": track.get("HDR_Format_Profile", {}), + "HDR_Format_Level": track.get("HDR_Format_Level", {}), + "HDR_Format_Settings": track.get("HDR_Format_Settings", {}), + "HDR_Format_Compression": track.get("HDR_Format_Compression", {}), + "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility", {}), + "CodecID": track.get("CodecID", {}), + "CodecID_Hint": track.get("CodecID_Hint", {}), + "Duration": track.get("Duration", {}), + "BitRate": track.get("BitRate", {}), + "Width": track.get("Width", {}), + "Height": track.get("Height", {}), + "Stored_Height": track.get("Stored_Height", {}), + "Sampled_Width": track.get("Sampled_Width", {}), + "Sampled_Height": track.get("Sampled_Height", {}), + "PixelAspectRatio": track.get("PixelAspectRatio", {}), + "DisplayAspectRatio": track.get("DisplayAspectRatio", {}), + "FrameRate_Mode": track.get("FrameRate_Mode", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameRate_Num": track.get("FrameRate_Num", {}), + "FrameRate_Den": track.get("FrameRate_Den", {}), + "FrameCount": track.get("FrameCount", {}), + "Standard": track.get("Standard", {}), + "ColorSpace": track.get("ColorSpace", {}), + "ChromaSubsampling": track.get("ChromaSubsampling", {}), + "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position", {}), + "BitDepth": track.get("BitDepth", {}), + "ScanType": track.get("ScanType", {}), + "ScanOrder": track.get("ScanOrder", {}), + "Delay": track.get("Delay", {}), + "Delay_Source": track.get("Delay_Source", {}), + "StreamSize": track.get("StreamSize", {}), + "Language": track.get("Language", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + "colour_description_present": track.get("colour_description_present", {}), + "colour_description_present_Source": track.get("colour_description_present_Source", {}), + "colour_range": track.get("colour_range", {}), + "colour_range_Source": track.get("colour_range_Source", {}), + "colour_primaries": track.get("colour_primaries", {}), + "colour_primaries_Source": track.get("colour_primaries_Source", {}), + "transfer_characteristics": track.get("transfer_characteristics", {}), + "transfer_characteristics_Source": track.get("transfer_characteristics_Source", {}), + "transfer_characteristics_Original": track.get("transfer_characteristics_Original", {}), + "matrix_coefficients": track.get("matrix_coefficients", {}), + "matrix_coefficients_Source": track.get("matrix_coefficients_Source", {}), + "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries", {}), + "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source", {}), + "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance", {}), + "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source", {}), + "MaxCLL": track.get("MaxCLL", {}), + "MaxCLL_Source": track.get("MaxCLL_Source", {}), + "MaxFALL": track.get("MaxFALL", {}), + "MaxFALL_Source": track.get("MaxFALL_Source", {}), + "Encoded_Library_Settings": track.get("Encoded_Library_Settings", {}), + }) + elif track["@type"] == "Audio": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "Format_Version": track.get("Format_Version", {}), + "Format_Profile": track.get("Format_Profile", {}), + "Format_Settings": track.get("Format_Settings", {}), + "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny", {}), + "Format_Settings_Endianness": track.get("Format_Settings_Endianness", {}), + "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures", {}), + "CodecID": track.get("CodecID", {}), + "Duration": track.get("Duration", {}), + "BitRate_Mode": track.get("BitRate_Mode", {}), + "BitRate": track.get("BitRate", {}), + "Channels": track.get("Channels", {}), + "ChannelPositions": track.get("ChannelPositions", {}), + "ChannelLayout": track.get("ChannelLayout", {}), + "Channels_Original": track.get("Channels_Original", {}), + "ChannelLayout_Original": track.get("ChannelLayout_Original", {}), + "SamplesPerFrame": track.get("SamplesPerFrame", {}), + "SamplingRate": track.get("SamplingRate", {}), + "SamplingCount": track.get("SamplingCount", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "Compression_Mode": track.get("Compression_Mode", {}), + "Delay": track.get("Delay", {}), + "Delay_Source": track.get("Delay_Source", {}), + "Video_Delay": track.get("Video_Delay", {}), + "StreamSize": track.get("StreamSize", {}), + "Title": track.get("Title", {}), + "Language": track.get("Language", {}), + "ServiceKind": track.get("ServiceKind", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + "extra": track.get("extra", {}), + }) + elif track["@type"] == "Text": + filtered["media"]["track"].append({ + "@type": track["@type"], + "@typeorder": track.get("@typeorder", {}), + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "CodecID": track.get("CodecID", {}), + "Duration": track.get("Duration", {}), + "BitRate": track.get("BitRate", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "ElementCount": track.get("ElementCount", {}), + "StreamSize": track.get("StreamSize", {}), + "Title": track.get("Title", {}), + "Language": track.get("Language", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + }) + elif track["@type"] == "Menu": + filtered["media"]["track"].append({ + "@type": track["@type"], + "extra": track.get("extra", {}), + }) + return filtered + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: + console.print("[bold yellow]Exporting MediaInfo...") + if not isdir: + os.chdir(os.path.dirname(video)) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: + export.write(media_info) + with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: + export_cleanpath.write(media_info.replace(video, os.path.basename(video))) + console.print("[bold green]MediaInfo Exported.") + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): + media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) + media_info_dict = json.loads(media_info_json) + filtered_info = filter_mediainfo(media_info_dict) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + json.dump(filtered_info, export, indent=4) + + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + return mi diff --git a/src/getseasonep.py b/src/getseasonep.py new file mode 100644 index 00000000..b5dddaf1 --- /dev/null +++ b/src/getseasonep.py @@ -0,0 +1,223 @@ +from src.console import console +from guessit import guessit +import anitopy +from pathlib import Path +import asyncio +import requests +import os +import re +from difflib import SequenceMatcher +from src.tmdb import get_tmdb_id, tmdb_other_meta, daily_to_tmdb_season_episode, get_romaji +from src.exceptions import * # noqa: F403 + + +async def get_season_episode(video, meta): + if meta['category'] == 'TV': + filelist = meta['filelist'] + meta['tv_pack'] = 0 + is_daily = False + if meta['anime'] is False: + try: + daily_match = re.search(r"\d{4}[-\.]\d{2}[-\.]\d{2}", video) + if meta.get('manual_date') or daily_match: + # Handle daily episodes + # The user either provided the --daily argument or a date was found in the filename + + if meta.get('manual_date') is None and daily_match is not None: + meta['manual_date'] = daily_match.group().replace('.', '-') + is_daily = True + guess_date = meta.get('manual_date', guessit(video).get('date')) if meta.get('manual_date') else guessit(video).get('date') + season_int, episode_int = await daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) + + season = f"S{str(season_int).zfill(2)}" + episode = f"E{str(episode_int).zfill(2)}" + # For daily shows, pass the supplied date as the episode title + # Season and episode will be stripped later to conform with standard daily episode naming format + meta['episode_title'] = meta.get('manual_date') + + else: + try: + guess_year = guessit(video)['year'] + except Exception: + guess_year = "" + if guessit(video)["season"] == guess_year: + if f"s{guessit(video)['season']}" in video.lower(): + season_int = str(guessit(video)["season"]) + season = "S" + season_int.zfill(2) + else: + season_int = "1" + season = "S01" + else: + season_int = str(guessit(video)["season"]) + season = "S" + season_int.zfill(2) + + except Exception: + console.print_exception() + season_int = "1" + season = "S01" + + try: + if is_daily is not True: + episodes = "" + if len(filelist) == 1: + episodes = guessit(video)['episode'] + if isinstance(episodes, list): + episode = "" + for item in guessit(video)["episode"]: + ep = (str(item).zfill(2)) + episode += f"E{ep}" + episode_int = episodes[0] + else: + episode_int = str(episodes) + episode = "E" + str(episodes).zfill(2) + else: + episode = "" + episode_int = "0" + meta['tv_pack'] = 1 + except Exception: + episode = "" + episode_int = "0" + meta['tv_pack'] = 1 + + else: + # If Anime + parsed = anitopy.parse(Path(video).name) + romaji, mal_id, eng_title, seasonYear, anilist_episodes = await get_romaji(parsed['anime_title'], meta.get('mal', None)) + if mal_id: + meta['mal_id'] = mal_id + if meta.get('mal') is not None: + mal_id = meta.get('mal') + if meta.get('tmdb_manual', None) is None: + year = parsed.get('anime_year', str(seasonYear)) + meta = await get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) + meta = await tmdb_other_meta(meta) + if meta['category'] != "TV": + return meta + + tag = parsed.get('release_group', "") + if tag != "": + meta['tag'] = f"-{tag}" + if len(filelist) == 1: + try: + episodes = parsed.get('episode_number', guessit(video).get('episode', '1')) + if not isinstance(episodes, list) and not episodes.isnumeric(): + episodes = guessit(video)['episode'] + if isinstance(episodes, list): + episode_int = int(episodes[0]) # Always convert to integer + episode = "".join([f"E{str(int(item)).zfill(2)}" for item in episodes]) + else: + episode_int = int(episodes) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" + except Exception: + episode = "E01" + episode_int = 1 # Ensure it's an integer + console.print('[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed') + await asyncio.sleep(1.5) + else: + episode = "" + episode_int = 0 # Ensure it's an integer + meta['tv_pack'] = 1 + + try: + if meta.get('season_int'): + season_int = int(meta.get('season_int')) # Convert to integer + else: + season = parsed.get('anime_season', guessit(video).get('season', '1')) + season_int = int(season) # Convert to integer + season = f"S{str(season_int).zfill(2)}" + except Exception: + try: + if episode_int >= anilist_episodes: + params = { + 'id': str(meta['tvdb_id']), + 'origin': 'tvdb', + 'absolute': str(episode_int), + } + url = "https://thexem.info/map/single" + response = requests.post(url, params=params).json() + if response['result'] == "failure": + raise XEMNotFound # noqa: F405 + if meta['debug']: + console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") + season_int = int(response['data']['scene']['season']) # Convert to integer + season = f"S{str(season_int).zfill(2)}" + if len(filelist) == 1: + episode_int = int(response['data']['scene']['episode']) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" + else: + season_int = 1 # Default to 1 if error occurs + season = "S01" + names_url = f"https://thexem.info/map/names?origin=tvdb&id={str(meta['tvdb_id'])}" + names_response = requests.get(names_url).json() + if meta['debug']: + console.log(f'[cyan]Matching Season Number from TheXEM\n{names_response}') + difference = 0 + if names_response['result'] == "success": + for season_num, values in names_response['data'].items(): + for lang, names in values.items(): + if lang == "jp": + for name in names: + romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) + diff = SequenceMatcher(None, romaji_check, name_check).ratio() + if romaji_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff + if lang == "us": + for name in names: + eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) + diff = SequenceMatcher(None, eng_check, name_check).ratio() + if eng_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff + else: + raise XEMNotFound # noqa: F405 + except Exception: + if meta['debug']: + console.print_exception() + try: + season = guessit(video).get('season', '1') + season_int = int(season) # Convert to integer + except Exception: + season_int = 1 # Default to 1 if error occurs + season = "S01" + console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") + console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") + await asyncio.sleep(3) + + if meta.get('manual_season', None) is None: + meta['season'] = season + else: + season_int = meta['manual_season'].lower().replace('s', '') + meta['season'] = f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" + if meta.get('manual_episode', None) is None: + meta['episode'] = episode + else: + episode_int = meta['manual_episode'].lower().replace('e', '') + meta['episode'] = f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" + meta['tv_pack'] = 0 + + # if " COMPLETE " in Path(video).name.replace('.', ' '): + # meta['season'] = "COMPLETE" + meta['season_int'] = season_int + meta['episode_int'] = episode_int + + # Manual episode title + if 'manual_episode_title' in meta and meta['manual_episode_title'] == "": + meta['episode_title_storage'] = meta.get('manual_episode_title') + else: + meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') + + if meta['season'] == "S00" or meta['episode'] == "E00": + meta['episode_title'] = meta['episode_title_storage'] + + # Guess the part of the episode (if available) + meta['part'] = "" + if meta['tv_pack'] == 1: + part = guessit(os.path.dirname(video)).get('part') + meta['part'] = f"Part {part}" if part else "" + + return meta diff --git a/src/imdb.py b/src/imdb.py new file mode 100644 index 00000000..0ff3dcf9 --- /dev/null +++ b/src/imdb.py @@ -0,0 +1,228 @@ +import requests +from difflib import SequenceMatcher +from imdb import Cinemagoer +from src.console import console + + +async def get_imdb_aka_api(imdb_id, meta): + if imdb_id == "0": + return "", None + if not imdb_id.startswith("tt"): + imdb_id = f"tt{imdb_id}" + url = "https://api.graphql.imdb.com/" + query = { + "query": f""" + query {{ + title(id: "{imdb_id}") {{ + id + titleText {{ + text + isOriginalTitle + }} + originalTitleText {{ + text + }} + countriesOfOrigin {{ + countries {{ + id + }} + }} + }} + }} + """ + } + + headers = { + "Content-Type": "application/json", + } + + response = requests.post(url, headers=headers, json=query) + data = response.json() + + # Check if `data` and `title` exist + title_data = data.get("data", {}).get("title") + if title_data is None: + console.print("Title data is missing from response") + return "", None + + # Extract relevant fields from the response + aka = title_data.get("originalTitleText", {}).get("text", "") + is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) + if meta.get('manual_language'): + original_language = meta.get('manual_language') + else: + original_language = None + + if not is_original and aka: + aka = f" AKA {aka}" + + return aka, original_language + + +async def safe_get(data, path, default=None): + for key in path: + if isinstance(data, dict): + data = data.get(key, default) + else: + return default + return data + + +async def get_imdb_info_api(imdbID, meta): + imdb_info = { + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), + } + if len(meta.get('tmdb_directors', [])) >= 1: + imdb_info['directors'] = meta['tmdb_directors'] + + if imdbID == "0": + return imdb_info + else: + try: + if not imdbID.startswith("tt"): + imdbIDtt = f"tt{imdbID}" + else: + imdbIDtt = imdbID + except Exception: + return imdb_info + query = { + "query": f""" + query GetTitleInfo {{ + title(id: "{imdbIDtt}") {{ + id + titleText {{ + text + isOriginalTitle + }} + originalTitleText {{ + text + }} + releaseYear {{ + year + }} + titleType {{ + id + }} + plot {{ + plotText {{ + plainText + }} + }} + ratingsSummary {{ + aggregateRating + voteCount + }} + primaryImage {{ + url + }} + runtime {{ + displayableProperty {{ + value {{ + plainText + }} + }} + seconds + }} + titleGenres {{ + genres {{ + genre {{ + text + }} + }} + }} + principalCredits {{ + category {{ + text + id + }} + credits {{ + name {{ + id + nameText {{ + text + }} + }} + }} + }} + }} + }} + """ + } + + url = "https://api.graphql.imdb.com/" + headers = {"Content-Type": "application/json"} + + response = requests.post(url, json=query, headers=headers) + data = response.json() + + if response.status_code != 200: + return imdb_info + + title_data = await safe_get(data, ["data", "title"], {}) + if not data or "data" not in data or "title" not in data["data"]: + return imdb_info + + imdb_info['imdbID'] = imdbID + imdb_info['title'] = await safe_get(title_data, ['titleText', 'text'], meta['title']) + imdb_info['year'] = await safe_get(title_data, ['releaseYear', 'year'], meta['year']) + original_title = await safe_get(title_data, ['originalTitleText', 'text'], '') + imdb_info['aka'] = original_title if original_title and original_title != imdb_info['title'] else imdb_info['title'] + imdb_info['type'] = await safe_get(title_data, ['titleType', 'id'], None) + runtime_seconds = await safe_get(title_data, ['runtime', 'seconds'], 0) + imdb_info['runtime'] = str(runtime_seconds // 60 if runtime_seconds else 60) + imdb_info['cover'] = await safe_get(title_data, ['primaryImage', 'url'], meta.get('poster', '')) + imdb_info['plot'] = await safe_get(title_data, ['plot', 'plotText', 'plainText'], 'No plot available') + genres = await safe_get(title_data, ['titleGenres', 'genres'], []) + genre_list = [await safe_get(g, ['genre', 'text'], '') for g in genres] + imdb_info['genres'] = ', '.join(filter(None, genre_list)) + imdb_info['rating'] = await safe_get(title_data, ['ratingsSummary', 'aggregateRating'], 'N/A') + imdb_info['directors'] = [] + principal_credits = await safe_get(title_data, ['principalCredits'], []) + if isinstance(principal_credits, list): + for pc in principal_credits: + category_text = await safe_get(pc, ['category', 'text'], '') + if 'Direct' in category_text: + credits = await safe_get(pc, ['credits'], []) + for c in credits: + name_id = await safe_get(c, ['name', 'id'], '') + if name_id.startswith('nm'): + imdb_info['directors'].append(name_id) + break + if meta.get('manual_language'): + imdb_info['original_langauge'] = meta.get('manual_language') + + return imdb_info + + +async def search_imdb(filename, search_year): + imdbID = '0' + ia = Cinemagoer() + search = ia.search_movie(filename) + for movie in search: + if filename in movie.get('title', ''): + if movie.get('year') == search_year: + imdbID = str(movie.movieID).replace('tt', '') + return imdbID + + +async def imdb_other_meta(self, meta): + imdb_info = meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) + meta['title'] = imdb_info['title'] + meta['year'] = imdb_info['year'] + meta['aka'] = imdb_info['aka'] + meta['poster'] = imdb_info['cover'] + meta['original_language'] = imdb_info['original_language'] + meta['overview'] = imdb_info['plot'] + meta['imdb_rating'] = imdb_info['rating'] + + difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() + if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): + meta['aka'] = "" + if f"({meta['year']})" in meta['aka']: + meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() + return meta diff --git a/src/manualpackage.py b/src/manualpackage.py new file mode 100644 index 00000000..5160ff05 --- /dev/null +++ b/src/manualpackage.py @@ -0,0 +1,91 @@ +import shutil +import requests +import os +import json +import urllib.parse +import re +from torf import Torrent +from glob import glob +from src.console import console +from src.uploadscreens import upload_screens + + +async def package(self, meta): + if meta['tag'] == "": + tag = "" + else: + tag = f" / {meta['tag'][1:]}" + if meta['is_disc'] == "DVD": + res = meta['source'] + else: + res = meta['resolution'] + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/GENERIC_INFO.txt", 'w', encoding="utf-8") as generic: + generic.write(f"Name: {meta['name']}\n\n") + generic.write(f"Overview: {meta['overview']}\n\n") + generic.write(f"{res} / {meta['type']}{tag}\n\n") + generic.write(f"Category: {meta['category']}\n") + generic.write(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n") + if meta['imdb_id'] != "0": + generic.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") + if meta['tvdb_id'] != "0": + generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") + if "tvmaze_id" in meta and meta['tvmaze_id'] != "0": + generic.write(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}\n") + poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" + if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): + if meta.get('rehosted_poster', None) is None: + r = requests.get(meta['poster'], stream=True) + if r.status_code == 200: + console.print("[bold yellow]Rehosting Poster") + r.raw.decode_content = True + with open(poster_img, 'wb') as f: + shutil.copyfileobj(r.raw, f) + poster, dummy = await upload_screens(meta, 1, 1, 0, 1, [poster_img], {}) + poster = poster[0] + generic.write(f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n") + meta['rehosted_poster'] = poster.get('raw_url', poster.get('img_url')) + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: + json.dump(meta, metafile, indent=4) + metafile.close() + else: + console.print("[bold yellow]Poster could not be retrieved") + elif os.path.exists(poster_img) and meta.get('rehosted_poster') is not None: + generic.write(f"TMDB Poster: {meta.get('rehosted_poster')}\n") + if len(meta['image_list']) > 0: + generic.write("\nImage Webpage:\n") + for each in meta['image_list']: + generic.write(f"{each['web_url']}\n") + generic.write("\nThumbnail Image:\n") + for each in meta['image_list']: + generic.write(f"{each['img_url']}\n") + title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) + archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" + torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent") + if isinstance(torrent_files, list) and len(torrent_files) > 1: + for each in torrent_files: + if not each.startswith(('BASE', '[RAND')): + os.remove(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{each}")) + try: + if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): + base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) + Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) + # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) + filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) + shutil.make_archive(archive, 'tar', f"{meta['base_dir']}/tmp/{meta['uuid']}") + if filebrowser is not None: + url = '/'.join(s.strip('/') for s in (filebrowser, f"/tmp/{meta['uuid']}")) + url = urllib.parse.quote(url, safe="https://") + else: + files = { + "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) + } + response = requests.post("https://uguu.se/upload.php", files=files).json() + if meta['debug']: + console.print(f"[cyan]{response}") + url = response['files'][0]['url'] + return url + except Exception: + return False + return diff --git a/src/prep.py b/src/prep.py index ff6ef0e3..2e791ea7 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2,61 +2,40 @@ from src.args import Args from src.console import console from src.exceptions import * # noqa: F403 -from src.trackers.PTP import PTP # noqa F401 -from src.trackers.BLU import BLU # noqa F401 -from src.trackers.AITHER import AITHER # noqa F401 -from src.trackers.LST import LST # noqa F401 -from src.trackers.OE import OE # noqa F401 -from src.trackers.HDB import HDB # noqa F401 -from src.trackers.TIK import TIK # noqa F401 -from src.trackers.COMMON import COMMON from src.clients import Clients from data.config import config from src.uphelper import UploadHelper from src.trackersetup import TRACKER_SETUP, tracker_class_map +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.tvmaze import search_tvmaze +from src.imdb import get_imdb_info_api, search_imdb, imdb_other_meta +from src.trackermeta import update_metadata_from_tracker +from src.tmdb import tmdb_other_meta, get_tmdb_imdb_from_mediainfo, get_tmdb_from_imdb, get_tmdb_id +from src.region import get_region, get_distributor, get_service +from src.exportmi import exportInfo, mi_resolution +from src.getseasonep import get_season_episode +from src.trackerstatus import process_all_trackers try: import traceback from src.discparse import DiscParse - import multiprocessing - from multiprocessing import get_context - from tqdm import tqdm import os import re - import math from str2bool import str2bool - import asyncio from guessit import guessit import ntpath from pathlib import Path import urllib import urllib.parse - import ffmpeg - import random import json import glob import requests - import pyimgbox from pymediainfo import MediaInfo import tmdbsimple as tmdb - from datetime import datetime - from difflib import SequenceMatcher - import torf - from torf import Torrent - import base64 import time - import anitopy - import shutil - from imdb import Cinemagoer import itertools import cli_ui - from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn # noqa F401 - import platform import aiohttp - from PIL import Image - import io - from io import BytesIO - import sys except ModuleNotFoundError: console.print(traceback.print_exc()) console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') @@ -80,346 +59,6 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] - async def prompt_user_for_confirmation(self, message: str) -> bool: - try: - response = input(f"{message} (Y/n): ").strip().lower() - if response in ["y", "yes", ""]: - return True - return False - except EOFError: - sys.exit(1) - - async def check_images_concurrently(self, imagelist, meta): - approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] - invalid_host_found = False # Track if any image is on a non-approved host - - # Ensure meta['image_sizes'] exists - if 'image_sizes' not in meta: - meta['image_sizes'] = {} - - # Map fixed resolution names to vertical resolutions - resolution_map = { - '8640p': 8640, - '4320p': 4320, - '2160p': 2160, - '1440p': 1440, - '1080p': 1080, - '1080i': 1080, - '720p': 720, - '576p': 576, - '576i': 576, - '480p': 480, - '480i': 480, - } - - # Get expected vertical resolution - expected_resolution_name = meta.get('resolution', None) - expected_vertical_resolution = resolution_map.get(expected_resolution_name, None) - - # If no valid resolution is found, skip processing - if expected_vertical_resolution is None: - console.print("[red]Meta resolution is invalid or missing. Skipping all images.[/red]") - return [] - - # Function to check each image's URL, host, and log resolution - async def check_and_collect(image_dict): - img_url = image_dict.get('raw_url') - if not img_url: - return None - - if "ptpimg.me" in img_url and img_url.startswith("http://"): - img_url = img_url.replace("http://", "https://") - image_dict['raw_url'] = img_url - image_dict['web_url'] = img_url - - # Verify the image link - if await self.check_image_link(img_url): - # Check if the image is hosted on an approved image host - if not any(host in img_url for host in approved_image_hosts): - nonlocal invalid_host_found - invalid_host_found = True # Mark that we found an invalid host - - async with aiohttp.ClientSession() as session: - async with session.get(img_url) as response: - if response.status == 200: - image_content = await response.read() - - try: - image = Image.open(BytesIO(image_content)) - vertical_resolution = image.height - lower_bound = expected_vertical_resolution * 0.70 # 30% below - if meta['is_disc'] == "DVD": - upper_bound = expected_vertical_resolution * 1.30 - else: - upper_bound = expected_vertical_resolution * 1.00 - - if not (lower_bound <= vertical_resolution <= upper_bound): - console.print( - f"[red]Image {img_url} resolution ({vertical_resolution}p) " - f"is outside the allowed range ({int(lower_bound)}-{int(upper_bound)}p). Skipping.[/red]" - ) - return None - - meta['image_sizes'][img_url] = len(image_content) - console.print( - f"Valid image {img_url} with resolution {image.width}x{image.height} " - f"and size {len(image_content) / 1024:.2f} KiB" - ) - except Exception as e: - console.print(f"[red]Failed to process image {img_url}: {e}") - return None - else: - console.print(f"[red]Failed to fetch image {img_url}. Skipping.") - - return image_dict - else: - return None - - # Run image verification concurrently - tasks = [check_and_collect(image_dict) for image_dict in imagelist] - results = await asyncio.gather(*tasks) - - # Collect valid images - valid_images = [image for image in results if image is not None] - - # Convert default_trackers string into a list - default_trackers = self.config['TRACKERS'].get('default_trackers', '') - trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] - - # Ensure meta['trackers'] is a list - if meta.get('trackers') is not None: - if isinstance(meta.get('trackers', ''), str): - meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] - if 'MTV' in meta.get('trackers', []): - if invalid_host_found: - console.print( - "[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]" - ) - # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list - elif 'MTV' in trackers_list: - if invalid_host_found: - console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]") - - return valid_images - - async def check_image_link(self, url): - async with aiohttp.ClientSession() as session: - try: - async with session.get(url) as response: - if response.status == 200: - content_type = response.headers.get('Content-Type', '').lower() - if 'image' in content_type: - # Attempt to load the image - image_data = await response.read() - try: - image = Image.open(io.BytesIO(image_data)) - image.verify() # This will check if the image is broken - console.print(f"[green]Image verified successfully: {url}[/green]") - return True - except (IOError, SyntaxError) as e: # noqa #F841 - console.print(f"[red]Image verification failed (corrupt image): {url}[/red]") - return False - else: - console.print(f"[red]Content type is not an image: {url}[/red]") - return False - else: - console.print(f"[red]Failed to retrieve image: {url} (status code: {response.status})[/red]") - return False - except Exception as e: - console.print(f"[red]Exception occurred while checking image: {url} - {str(e)}[/red]") - return False - - async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): - # Unpack the expected 9 elements, ignoring any additional ones - tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data - - if tmdb not in [None, '0']: - meta['tmdb_manual'] = tmdb - if imdb not in [None, '0']: - meta['imdb'] = str(imdb).zfill(7) - if tvdb not in [None, '0']: - meta['tvdb_id'] = tvdb - if mal not in [None, '0']: - meta['mal'] = mal - if desc not in [None, '0', '']: - meta['description'] = desc - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write((desc or "") + "\n") - if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() - - if not meta.get('image_list'): # Only handle images if image_list is not already populated - if imagelist: # Ensure imagelist is not empty before setting - valid_images = await self.check_images_concurrently(imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - if meta.get('image_list'): # Double-check if image_list is set before handling it - if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta['unattended']: - await self.handle_image_list(meta, tracker_name) - - if filename: - meta[f'{tracker_name.lower()}_filename'] = filename - - console.print(f"[green]{tracker_name} data successfully updated in meta[/green]") - - async def update_metadata_from_tracker(self, tracker_name, tracker_instance, meta, search_term, search_file_folder): - tracker_key = tracker_name.lower() - manual_key = f"{tracker_key}_manual" - found_match = False - - if tracker_name in ["BLU", "AITHER", "LST", "OE", "TIK"]: - if meta.get(tracker_key) is not None: - console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") - tracker_data = await COMMON(self.config).unit3d_torrent_info( - tracker_name, - tracker_instance.torrent_url, - tracker_instance.search_url, - meta, - id=meta[tracker_key] - ) - else: - console.print(f"[yellow]No ID found in meta for {tracker_name}, searching by file name[/yellow]") - tracker_data = await COMMON(self.config).unit3d_torrent_info( - tracker_name, - tracker_instance.torrent_url, - tracker_instance.search_url, - meta, - file_name=search_term - ) - - if any(item not in [None, '0'] for item in tracker_data[:3]): # Check for valid tmdb, imdb, or tvdb - console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") - await self.update_meta_with_unit3d_data(meta, tracker_data, tracker_name) - found_match = True - else: - console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") - found_match = False - - elif tracker_name == "PTP": - imdb_id = None - if meta.get('ptp') is None: - imdb_id, ptp_torrent_id, ptp_torrent_hash = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder, meta) - if ptp_torrent_id: - meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None - console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") - - if not meta['unattended']: - if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): - found_match = True - meta['ptp'] = ptp_torrent_id - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write((ptp_desc or "") + "\n") - - if not meta.get('image_list'): - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - await self.handle_image_list(meta, tracker_name) - - else: - found_match = False - meta['imdb'] = None - - else: - found_match = True - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write((ptp_desc or "") + "\n") - meta['saved_description'] = True - - if not meta.get('image_list'): - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - else: - console.print("[yellow]Skipping PTP as no match found[/yellow]") - found_match = False - - else: - ptp_torrent_id = meta['ptp'] - console.print("[cyan]Using specified PTP ID to get IMDb ID[/cyan]") - imdb_id, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) - if imdb_id: - meta['imdb'] = str(imdb_id).zfill(7) - console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") - found_match = True - meta['skipit'] = True - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write(ptp_desc + "\n") - meta['saved_description'] = True - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - console.print("[green]PTP images added to metadata.[/green]") - else: - console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") - found_match = False - - elif tracker_name == "HDB": - if meta.get('hdb') is not None: - meta[manual_key] = meta[tracker_key] - console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") - - # Use get_info_from_torrent_id function if ID is found in meta - imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) - - meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') - meta['hdb_name'] = hdb_name - found_match = True - - # Skip user confirmation if searching by ID - console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") - else: - console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") - - # Use search_filename function if ID is not found in meta - imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder, meta) - - meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') - meta['hdb_name'] = hdb_name - if tracker_id: - meta[tracker_key] = tracker_id - found_match = True - - if found_match: - if imdb or tvdb_id or hdb_name: - console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") - if await self.prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): - console.print(f"[green]{tracker_name} data retained.[/green]") - else: - console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") - meta[tracker_key] = None - meta['tvdb_id'] = None - meta['hdb_name'] = None - found_match = False - else: - found_match = False - - return meta, found_match - - async def handle_image_list(self, meta, tracker_name): - if meta.get('image_list'): - console.print(f"[cyan]Found the following images from {tracker_name}:") - for img in meta['image_list']: - console.print(f"[blue]{img}[/blue]") - - if meta['unattended']: - keep_images = True - else: - keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") - if not keep_images: - meta['image_list'] = [] - meta['image_sizes'] = {} - console.print(f"[yellow]Images discarded from {tracker_name}.") - else: - console.print(f"[green]Images retained from {tracker_name}.") - async def gather_prep(self, meta, mode): meta['cutoff'] = int(self.config['DEFAULT'].get('cutoff_screens', 3)) task_limit = self.config['DEFAULT'].get('task_limit', "0") @@ -449,7 +88,7 @@ async def gather_prep(self, meta, mode): # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") if meta['is_disc'] == "BDMV": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = await self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] # No filelist for discs, use path search_term = os.path.basename(meta['path']) search_file_folder = 'folder' @@ -471,13 +110,13 @@ async def gather_prep(self, meta, mode): meta['search_year'] = "" if meta.get('resolution', None) is None: - meta['resolution'] = self.mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) - meta['sd'] = self.is_sd(meta['resolution']) + meta['resolution'] = await mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) + meta['sd'] = await self.is_sd(meta['resolution']) mi = None elif meta['is_disc'] == "DVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = await self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] search_term = os.path.basename(meta['path']) search_file_folder = 'folder' @@ -489,17 +128,17 @@ async def gather_prep(self, meta, mode): except Exception: meta['search_year'] = "" if not meta.get('edit', False): - mi = self.exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False) + mi = await exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] meta['dvd_size'] = await self.get_dvd_size(meta['discs'], meta.get('manual_dvds')) - meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - meta['sd'] = self.is_sd(meta['resolution']) + meta['resolution'] = await self.get_resolution(guessit(video), meta['uuid'], base_dir) + meta['sd'] = await self.is_sd(meta['resolution']) elif meta['is_disc'] == "HDDVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = await self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] search_term = os.path.basename(meta['path']) search_file_folder = 'folder' @@ -512,18 +151,18 @@ async def gather_prep(self, meta, mode): except Exception: meta['search_year'] = "" if not meta.get('edit', False): - mi = self.exportInfo(meta['discs'][0]['largest_evo'], False, meta['uuid'], meta['base_dir'], export_text=False) + mi = await exportInfo(meta['discs'][0]['largest_evo'], False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) + meta['resolution'] = await self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) else: - videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) + videopath, meta['filelist'] = await self.get_video(videoloc, meta.get('mode', 'discord')) search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None search_file_folder = 'file' - video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta, meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = await self.is_scene(videopath, meta, meta.get('imdb', None)) guess_name = ntpath.basename(video).replace('-', ' ') filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes": ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) @@ -533,14 +172,14 @@ async def gather_prep(self, meta, mode): meta['search_year'] = "" if not meta.get('edit', False): - mi = self.exportInfo(videopath, meta['isdir'], meta['uuid'], base_dir, export_text=True) + mi = await exportInfo(videopath, meta['isdir'], meta['uuid'], base_dir, export_text=True) meta['mediainfo'] = mi else: mi = meta['mediainfo'] if meta.get('resolution', None) is None: - meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - meta['sd'] = self.is_sd(meta['resolution']) + meta['resolution'] = await self.get_resolution(guessit(video), meta['uuid'], base_dir) + meta['sd'] = await self.is_sd(meta['resolution']) if " AKA " in filename.replace('.', ' '): filename = filename.split('AKA')[0] @@ -563,7 +202,7 @@ async def gather_prep(self, meta, mode): client = Clients(config=config) if meta.get('infohash') is not None: meta = await client.get_ptp_from_hash(meta) - + tracker_setup = TRACKER_SETUP(config=config) if not meta.get('image_list'): # Reuse information from trackers with fallback found_match = False @@ -579,18 +218,18 @@ async def gather_prep(self, meta, mode): 'oe': 'OE', 'tik': 'TIK', } - specific_tracker = next((tracker_keys[key] for key in tracker_keys if meta.get(key)), None) + + specific_tracker = next((tracker_keys[key] for key in tracker_keys if meta.get(key) is not None), None) async def process_tracker(tracker_name, meta): nonlocal found_match - tracker_class = globals().get(tracker_name) - if tracker_class is None: + if tracker_class_map is None: print(f"Tracker class for {tracker_name} not found.") return meta - tracker_instance = tracker_class(config=self.config) + tracker_instance = tracker_class_map[tracker_name](config=config) try: - updated_meta, match = await self.update_metadata_from_tracker( + updated_meta, match = await update_metadata_from_tracker( tracker_name, tracker_instance, meta, search_term, search_file_folder ) if match: @@ -629,43 +268,43 @@ async def process_tracker(tracker_name, meta): if meta.get('manual_language'): meta['original_langauge'] = meta.get('manual_language').lower() meta['tmdb'] = meta.get('tmdb_manual', None) - meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) + meta['type'] = await self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: - meta['category'] = self.get_cat(video) + meta['category'] = await self.get_cat(video) else: meta['category'] = meta['category'].upper() if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) + meta['category'], meta['tmdb'], meta['imdb'] = await get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: - meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) + meta = await get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: meta['imdb_id'] = str(meta['imdb']).replace('tt', '') - meta = await self.get_tmdb_from_imdb(meta, filename) + meta = await get_tmdb_from_imdb(meta, filename) else: meta['tmdb_manual'] = meta.get('tmdb', None) # If no tmdb, use imdb for meta if int(meta['tmdb']) == 0: - meta = await self.imdb_other_meta(meta) + meta = await imdb_other_meta(meta) else: - meta = await self.tmdb_other_meta(meta) + meta = await tmdb_other_meta(meta) # Search tvmaze if meta['category'] == "TV": - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) + meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) else: meta.setdefault('tvmaze_id', '0') # If no imdb, search for it if meta.get('imdb_id', None) is None: - meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) + meta['imdb_id'] = await search_imdb(filename, meta['search_year']) if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: - meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) + meta['imdb_info'] = await get_imdb_info_api(meta['imdb_id'], meta) if meta.get('tag', None) is None: - meta['tag'] = self.get_tag(video, meta) + meta['tag'] = await self.get_tag(video, meta) else: if not meta['tag'].startswith('-') and meta['tag'] != "": meta['tag'] = f"-{meta['tag']}" if meta['category'] == "TV": - meta = await self.get_season_episode(video, meta) + meta = await get_season_episode(video, meta) meta = await self.tag_override(meta) if meta.get('tag') == "-SubsPlease": # SubsPlease-specific tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks @@ -677,32 +316,32 @@ async def process_tracker(tracker_name, meta): elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user meta['service'] = "HIDI" meta['video'] = video - meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) + meta['audio'], meta['channels'], meta['has_commentary'] = await self.get_audio_v2(mi, meta, bdinfo) if meta['tag'][1:].startswith(meta['channels']): meta['tag'] = meta['tag'].replace(f"-{meta['channels']}", '') if meta.get('no_tag', False): meta['tag'] = "" - meta['3D'] = self.is_3d(mi, bdinfo) + meta['3D'] = await self.is_3d(mi, bdinfo) if meta.get('manual_source', None): meta['source'] = meta['manual_source'] - _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) + _, meta['type'] = await self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) else: - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) + meta['source'], meta['type'] = await self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) if meta.get('service', None) in (None, ''): - meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) + meta['service'], meta['service_longname'] = await get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) elif meta.get('service'): - services = self.get_service(get_services_only=True) + services = await get_service(get_services_only=True) meta['service_longname'] = max((k for k, v in services.items() if v == meta['service']), key=len, default=meta['service']) - meta['uhd'] = self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) - meta['hdr'] = self.get_hdr(mi, bdinfo) - meta['distributor'] = self.get_distributor(meta['distributor']) + meta['uhd'] = await self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) + meta['hdr'] = await self.get_hdr(mi, bdinfo) + meta['distributor'] = await get_distributor(meta['distributor']) if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific - meta['region'] = self.get_region(bdinfo, meta.get('region', None)) - meta['video_codec'] = self.get_video_codec(bdinfo) + meta['region'] = await get_region(bdinfo, meta.get('region', None)) + meta['video_codec'] = await self.get_video_codec(bdinfo) else: - meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) + meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = await self.get_video_encode(mi, meta['type'], bdinfo) if meta.get('no_edition') is False: - meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) + meta['edition'], meta['repack'] = await self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') @@ -714,14 +353,12 @@ async def process_tracker(tracker_name, meta): console.print(f"Metadata processed in {meta_finish_time - meta_start_time:.2f} seconds") parser = Args(config) helper = UploadHelper() - common = COMMON(config=config) - tracker_setup = TRACKER_SETUP(config=config) enabled_trackers = tracker_setup.trackers_enabled(meta) if "saved_trackers" not in meta: meta['trackers'] = enabled_trackers else: meta['trackers'] = meta['saved_trackers'] - confirm = helper.get_confirmation(meta) + confirm = await helper.get_confirmation(meta) while confirm is False: with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) @@ -734,119 +371,14 @@ async def process_tracker(tracker_name, meta): meta['edit'] = True meta = await self.gather_prep(meta=meta, mode='cli') meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) - confirm = helper.get_confirmation(meta) - - tracker_status = {} - successful_trackers = 0 - - for tracker_name in meta['trackers']: - disctype = meta.get('disctype', None) - tracker_name = tracker_name.replace(" ", "").upper().strip() - - if meta['name'].endswith('DUPE?'): - meta['name'] = meta['name'].replace(' DUPE?', '') - - if tracker_name in tracker_class_map: - tracker_class = tracker_class_map[tracker_name](config=config) - tracker_status[tracker_name] = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False} - - if tracker_name in {"THR", "PTP"}: - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if tracker_name == "PTP": - console.print("[yellow]Searching for Group ID") - ptp = PTP(config=config) - groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID is None: - console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") - meta['youtube'] = youtube - meta['ptp_groupID'] = groupID - - if tracker_name == "THR": - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") - meta['youtube'] = youtube - - if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - console.print(f"[red]Tracker '{tracker_name}' is banned. Skipping.[/red]") - tracker_status[tracker_name]['banned'] = True - continue - - if tracker_name not in {"THR", "PTP", "TL"}: - dupes = await tracker_class.search_existing(meta, disctype) - elif tracker_name == "PTP": - dupes = await ptp.search_existing(groupID, meta, disctype) - if 'skipping' not in meta or meta['skipping'] is None: - dupes = await common.filter_dupes(dupes, meta) - meta, is_dupe = helper.dupe_check(dupes, meta, tracker_name) - if is_dupe: - console.print(f"[red]Skipping upload on {tracker_name}[/red]") - print() - tracker_status[tracker_name]['dupe'] = True - elif meta['skipping']: - tracker_status[tracker_name]['skipped'] = True - if tracker_name == "MTV": - if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: - tracker_config = self.config['TRACKERS'].get(tracker_name, {}) - if str(tracker_config.get('prefer_mtv_torrent', 'false')).lower() == "true": - meta['prefer_small_pieces'] = True - else: - meta['prefer_small_pieces'] = False - if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": - torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - if not os.path.exists(torrent_path): - check_torrent = await client.find_existing_torrent(meta) - if check_torrent: - console.print(f"[yellow]Existing torrent found on {check_torrent}[/yellow]") - self.create_base_from_existing_torrent(check_torrent, meta['base_dir'], meta['uuid']) - torrent = Torrent.read(torrent_path) - if torrent.piece_size > 8388608: - console.print("[yellow]No existing torrent found with piece size lesser than 8MB[/yellow]") - tracker_status[tracker_name]['skipped'] = True - elif os.path.exists(torrent_path): - torrent = Torrent.read(torrent_path) - if torrent.piece_size > 8388608: - console.print("[yellow]Existing torrent found with piece size greater than 8MB[/yellow]") - tracker_status[tracker_name]['skipped'] = True - if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": - if meta.get('imdb_info', {}) == {}: - meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) - if not meta['debug']: - if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: - console.print(f"[bold yellow]Tracker '{tracker_name}' passed all checks.") - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - edit_choice = input("Enter 'y' to upload, or press enter to skip uploading:") - if edit_choice.lower() == 'y': - tracker_status[tracker_name]['upload'] = True - successful_trackers += 1 - else: - tracker_status[tracker_name]['upload'] = False - else: - tracker_status[tracker_name]['upload'] = True - successful_trackers += 1 - else: - tracker_status[tracker_name]['upload'] = True - successful_trackers += 1 - meta['skipping'] = None - else: - if tracker_name == "MANUAL": - successful_trackers += 1 - - meta['tracker_status'] = tracker_status + confirm = await helper.get_confirmation(meta) if meta['debug']: - console.print("\n[bold]Tracker Processing Summary:[/bold]") - for t_name, status in tracker_status.items(): - banned_status = 'Yes' if status['banned'] else 'No' - skipped_status = 'Yes' if status['skipped'] else 'No' - dupe_status = 'Yes' if status['dupe'] else 'No' - upload_status = 'Yes' if status['upload'] else 'No' - if meta['debug']: - console.print(f"Tracker: {t_name} | Banned: {banned_status} | Skipped: {skipped_status} | Dupe: {dupe_status} | [yellow]Upload:[/yellow] {upload_status}") + dupe_start_time = time.time() + successful_trackers = await process_all_trackers(meta) if meta['debug']: - console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") + dupe_finish_time = time.time() + console.print(f"Dupe checking processed in {dupe_finish_time - dupe_start_time:.2f} seconds") meta['skip_uploading'] = int(self.config['DEFAULT'].get('tracker_pass_checks', 1)) if not meta['debug']: @@ -864,44 +396,38 @@ async def process_tracker(tracker_name, meta): manual_frames = meta['manual_frames'] # Take Screenshots if meta['is_disc'] == "BDMV": - if meta.get('edit', False) is False: - if meta.get('vapoursynth', False) is True: - use_vs = True - else: - use_vs = False + if not meta.get('edit', False): + use_vs = meta.get('vapoursynth', False) try: - ds = multiprocessing.Process(target=self.disc_screenshots, args=(meta, filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - except KeyboardInterrupt: - ds.terminate() + disc_screenshots( + meta, filename, bdinfo, meta['uuid'], base_dir, use_vs, + meta.get('image_list', []), meta.get('ffdebug', False), None + ) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") + elif meta['is_disc'] == "DVD": - if meta.get('edit', False) is False: + if not meta.get('edit', False): try: - ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None, None)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - except KeyboardInterrupt: - ds.terminate() + dvd_screenshots( + meta, 0, None, None + ) + except Exception as e: + print(f"Error during DVD screenshot capture: {e}") + else: - if meta.get('edit', False) is False: + if not meta.get('edit', False): try: - s = multiprocessing.Process( - target=self.screenshots, - args=(videopath, filename, meta['uuid'], base_dir, meta), # Positional arguments - kwargs={'manual_frames': manual_frames} # Keyword argument + screenshots( + videopath, filename, meta['uuid'], base_dir, meta, + manual_frames=manual_frames # Pass additional kwargs directly ) - s.start() - while s.is_alive() is True: - await asyncio.sleep(3) - except KeyboardInterrupt: - s.terminate() + except Exception as e: + print(f"Error during generic screenshot capture: {e}") # WORK ON THIS meta.get('stream', False) - meta['stream'] = self.stream_optimized(meta['stream']) + meta['stream'] = await self.stream_optimized(meta['stream']) meta.get('anon', False) meta['anon'] = self.is_anon(meta['anon']) if meta['saved_description'] is False: @@ -977,7 +503,7 @@ async def get_disc(self, meta): Get video files """ - def get_video(self, videoloc, mode): + async def get_video(self, videoloc, mode): filelist = [] videoloc = os.path.abspath(videoloc) if os.path.isdir(videoloc): @@ -997,208 +523,11 @@ def get_video(self, videoloc, mode): filelist = sorted(filelist) return video, filelist - """ - Get and parse mediainfo - """ - def exportInfo(self, video, isdir, folder_id, base_dir, export_text): - def filter_mediainfo(data): - filtered = { - "creatingLibrary": data.get("creatingLibrary"), - "media": { - "@ref": data["media"]["@ref"], - "track": [] - } - } - - for track in data["media"]["track"]: - if track["@type"] == "General": - filtered["media"]["track"].append({ - "@type": track["@type"], - "UniqueID": track.get("UniqueID", {}), - "VideoCount": track.get("VideoCount", {}), - "AudioCount": track.get("AudioCount", {}), - "TextCount": track.get("TextCount", {}), - "MenuCount": track.get("MenuCount", {}), - "FileExtension": track.get("FileExtension", {}), - "Format": track.get("Format", {}), - "Format_Version": track.get("Format_Version", {}), - "FileSize": track.get("FileSize", {}), - "Duration": track.get("Duration", {}), - "OverallBitRate": track.get("OverallBitRate", {}), - "FrameRate": track.get("FrameRate", {}), - "FrameCount": track.get("FrameCount", {}), - "StreamSize": track.get("StreamSize", {}), - "IsStreamable": track.get("IsStreamable", {}), - "File_Created_Date": track.get("File_Created_Date", {}), - "File_Created_Date_Local": track.get("File_Created_Date_Local", {}), - "File_Modified_Date": track.get("File_Modified_Date", {}), - "File_Modified_Date_Local": track.get("File_Modified_Date_Local", {}), - "Encoded_Application": track.get("Encoded_Application", {}), - "Encoded_Library": track.get("Encoded_Library", {}), - }) - elif track["@type"] == "Video": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder", {}), - "ID": track.get("ID", {}), - "UniqueID": track.get("UniqueID", {}), - "Format": track.get("Format", {}), - "Format_Profile": track.get("Format_Profile", {}), - "Format_Version": track.get("Format_Version", {}), - "Format_Level": track.get("Format_Level", {}), - "Format_Tier": track.get("Format_Tier", {}), - "HDR_Format": track.get("HDR_Format", {}), - "HDR_Format_Version": track.get("HDR_Format_Version", {}), - "HDR_Format_String": track.get("HDR_Format_String", {}), - "HDR_Format_Profile": track.get("HDR_Format_Profile", {}), - "HDR_Format_Level": track.get("HDR_Format_Level", {}), - "HDR_Format_Settings": track.get("HDR_Format_Settings", {}), - "HDR_Format_Compression": track.get("HDR_Format_Compression", {}), - "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility", {}), - "CodecID": track.get("CodecID", {}), - "CodecID_Hint": track.get("CodecID_Hint", {}), - "Duration": track.get("Duration", {}), - "BitRate": track.get("BitRate", {}), - "Width": track.get("Width", {}), - "Height": track.get("Height", {}), - "Stored_Height": track.get("Stored_Height", {}), - "Sampled_Width": track.get("Sampled_Width", {}), - "Sampled_Height": track.get("Sampled_Height", {}), - "PixelAspectRatio": track.get("PixelAspectRatio", {}), - "DisplayAspectRatio": track.get("DisplayAspectRatio", {}), - "FrameRate_Mode": track.get("FrameRate_Mode", {}), - "FrameRate": track.get("FrameRate", {}), - "FrameRate_Num": track.get("FrameRate_Num", {}), - "FrameRate_Den": track.get("FrameRate_Den", {}), - "FrameCount": track.get("FrameCount", {}), - "Standard": track.get("Standard", {}), - "ColorSpace": track.get("ColorSpace", {}), - "ChromaSubsampling": track.get("ChromaSubsampling", {}), - "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position", {}), - "BitDepth": track.get("BitDepth", {}), - "ScanType": track.get("ScanType", {}), - "ScanOrder": track.get("ScanOrder", {}), - "Delay": track.get("Delay", {}), - "Delay_Source": track.get("Delay_Source", {}), - "StreamSize": track.get("StreamSize", {}), - "Language": track.get("Language", {}), - "Default": track.get("Default", {}), - "Forced": track.get("Forced", {}), - "colour_description_present": track.get("colour_description_present", {}), - "colour_description_present_Source": track.get("colour_description_present_Source", {}), - "colour_range": track.get("colour_range", {}), - "colour_range_Source": track.get("colour_range_Source", {}), - "colour_primaries": track.get("colour_primaries", {}), - "colour_primaries_Source": track.get("colour_primaries_Source", {}), - "transfer_characteristics": track.get("transfer_characteristics", {}), - "transfer_characteristics_Source": track.get("transfer_characteristics_Source", {}), - "transfer_characteristics_Original": track.get("transfer_characteristics_Original", {}), - "matrix_coefficients": track.get("matrix_coefficients", {}), - "matrix_coefficients_Source": track.get("matrix_coefficients_Source", {}), - "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries", {}), - "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source", {}), - "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance", {}), - "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source", {}), - "MaxCLL": track.get("MaxCLL", {}), - "MaxCLL_Source": track.get("MaxCLL_Source", {}), - "MaxFALL": track.get("MaxFALL", {}), - "MaxFALL_Source": track.get("MaxFALL_Source", {}), - "Encoded_Library_Settings": track.get("Encoded_Library_Settings", {}), - }) - elif track["@type"] == "Audio": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder", {}), - "ID": track.get("ID", {}), - "UniqueID": track.get("UniqueID", {}), - "Format": track.get("Format", {}), - "Format_Version": track.get("Format_Version", {}), - "Format_Profile": track.get("Format_Profile", {}), - "Format_Settings": track.get("Format_Settings", {}), - "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny", {}), - "Format_Settings_Endianness": track.get("Format_Settings_Endianness", {}), - "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures", {}), - "CodecID": track.get("CodecID", {}), - "Duration": track.get("Duration", {}), - "BitRate_Mode": track.get("BitRate_Mode", {}), - "BitRate": track.get("BitRate", {}), - "Channels": track.get("Channels", {}), - "ChannelPositions": track.get("ChannelPositions", {}), - "ChannelLayout": track.get("ChannelLayout", {}), - "Channels_Original": track.get("Channels_Original", {}), - "ChannelLayout_Original": track.get("ChannelLayout_Original", {}), - "SamplesPerFrame": track.get("SamplesPerFrame", {}), - "SamplingRate": track.get("SamplingRate", {}), - "SamplingCount": track.get("SamplingCount", {}), - "FrameRate": track.get("FrameRate", {}), - "FrameCount": track.get("FrameCount", {}), - "Compression_Mode": track.get("Compression_Mode", {}), - "Delay": track.get("Delay", {}), - "Delay_Source": track.get("Delay_Source", {}), - "Video_Delay": track.get("Video_Delay", {}), - "StreamSize": track.get("StreamSize", {}), - "Title": track.get("Title", {}), - "Language": track.get("Language", {}), - "ServiceKind": track.get("ServiceKind", {}), - "Default": track.get("Default", {}), - "Forced": track.get("Forced", {}), - "extra": track.get("extra", {}), - }) - elif track["@type"] == "Text": - filtered["media"]["track"].append({ - "@type": track["@type"], - "@typeorder": track.get("@typeorder", {}), - "StreamOrder": track.get("StreamOrder", {}), - "ID": track.get("ID", {}), - "UniqueID": track.get("UniqueID", {}), - "Format": track.get("Format", {}), - "CodecID": track.get("CodecID", {}), - "Duration": track.get("Duration", {}), - "BitRate": track.get("BitRate", {}), - "FrameRate": track.get("FrameRate", {}), - "FrameCount": track.get("FrameCount", {}), - "ElementCount": track.get("ElementCount", {}), - "StreamSize": track.get("StreamSize", {}), - "Title": track.get("Title", {}), - "Language": track.get("Language", {}), - "Default": track.get("Default", {}), - "Forced": track.get("Forced", {}), - }) - elif track["@type"] == "Menu": - filtered["media"]["track"].append({ - "@type": track["@type"], - "extra": track.get("extra", {}), - }) - return filtered - - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: - console.print("[bold yellow]Exporting MediaInfo...") - if not isdir: - os.chdir(os.path.dirname(video)) - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: - export.write(media_info) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: - export_cleanpath.write(media_info.replace(video, os.path.basename(video))) - console.print("[bold green]MediaInfo Exported.") - - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): - media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) - media_info_dict = json.loads(media_info_json) - filtered_info = filter_mediainfo(media_info_dict) - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: - json.dump(filtered_info, export, indent=4) - - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) - - return mi - """ Get Resolution """ - def get_resolution(self, guess, folder_id, base_dir): + async def get_resolution(self, guess, folder_id, base_dir): with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: mi = json.load(f) try: @@ -1227,14 +556,14 @@ def get_resolution(self, guess, folder_id, base_dir): scan = "i" # Default to interlaced if no indicators are found width_list = [3840, 2560, 1920, 1280, 1024, 854, 720, 15360, 7680, 0] height_list = [2160, 1440, 1080, 720, 576, 540, 480, 8640, 4320, 0] - width = self.closest(width_list, int(width)) + width = await self.closest(width_list, int(width)) actual_height = int(height) - height = self.closest(height_list, int(height)) + height = await self.closest(height_list, int(height)) res = f"{width}x{height}{scan}" - resolution = self.mi_resolution(res, guess, width, scan, height, actual_height) + resolution = await mi_resolution(res, guess, width, scan, height, actual_height) return resolution - def closest(self, lst, K): + async def closest(self, lst, K): # Get closest, but not over lst = sorted(lst) mi_input = K @@ -1249,53 +578,7 @@ def closest(self, lst, K): # return lst[min(range(len(lst)), key = lambda i: abs(lst[i]-K))] - def mi_resolution(self, res, guess, width, scan, height, actual_height): - res_map = { - "3840x2160p": "2160p", "2160p": "2160p", - "2560x1440p": "1440p", "1440p": "1440p", - "1920x1080p": "1080p", "1080p": "1080p", - "1920x1080i": "1080i", "1080i": "1080i", - "1280x720p": "720p", "720p": "720p", - "1280x540p": "720p", "1280x576p": "720p", - "1024x576p": "576p", "576p": "576p", - "1024x576i": "576i", "576i": "576i", - "854x480p": "480p", "480p": "480p", - "854x480i": "480i", "480i": "480i", - "720x576p": "576p", "576p": "576p", - "720x576i": "576i", "576i": "576i", - "720x480p": "480p", "480p": "480p", - "720x480i": "480i", "480i": "480i", - "15360x8640p": "8640p", "8640p": "8640p", - "7680x4320p": "4320p", "4320p": "4320p", - "OTHER": "OTHER"} - resolution = res_map.get(res, None) - if actual_height == 540: - resolution = "OTHER" - if resolution is None: - try: - resolution = guess['screen_size'] - except Exception: - width_map = { - '3840p': '2160p', - '2560p': '1550p', - '1920p': '1080p', - '1920i': '1080i', - '1280p': '720p', - '1024p': '576p', - '1024i': '576i', - '854p': '480p', - '854i': '480i', - '720p': '576p', - '720i': '576i', - '15360p': '4320p', - 'OTHERp': 'OTHER' - } - resolution = width_map.get(f"{width}{scan}", "OTHER") - resolution = self.mi_resolution(resolution, guess, width, scan, height, actual_height) - - return resolution - - def is_sd(self, resolution): + async def is_sd(self, resolution): if resolution in ("480i", "480p", "576i", "576p", "540p"): sd = 1 else: @@ -1305,7 +588,7 @@ def is_sd(self, resolution): """ Is a scene release? """ - def is_scene(self, video, meta, imdb=None): + async def is_scene(self, video, meta, imdb=None): scene = False base = os.path.basename(video) base = os.path.splitext(base)[0] @@ -1368,810 +651,11 @@ def is_scene(self, video, meta, imdb=None): return video, scene, imdb - """ - Generate Screenshots - """ - def sanitize_filename(self, filename): - # Replace invalid characters like colons with an underscore - return re.sub(r'[<>:"/\\|?*]', '_', filename) - - def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None, force_screenshots=False): - if 'image_list' not in meta: - meta['image_list'] = [] - existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - - if len(existing_images) >= meta.get('cutoff') and not force_screenshots: - console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) - return - - if num_screens is None: - num_screens = self.screens - if num_screens == 0 or len(image_list) >= num_screens: - return - - sanitized_filename = self.sanitize_filename(filename) - length = 0 - file = None - frame_rate = None - for each in bdinfo['files']: - int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) - if int_length > length: - length = int_length - for root, dirs, files in os.walk(bdinfo['path']): - for name in files: - if name.lower() == each['file'].lower(): - file = os.path.join(root, name) - - if 'video' in bdinfo and bdinfo['video']: - fps_string = bdinfo['video'][0].get('fps', None) - if fps_string: - try: - frame_rate = float(fps_string.split(' ')[0]) # Extract and convert to float - except ValueError: - console.print("[red]Error: Unable to parse frame rate from bdinfo['video'][0]['fps']") - - keyframe = 'nokey' if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "" else 'none' - - os.chdir(f"{base_dir}/tmp/{folder_id}") - existing_screens = glob.glob(f"{sanitized_filename}-*.png") - total_existing = len(existing_screens) + len(existing_images) - if not force_screenshots: - num_screens = max(0, self.screens - total_existing) - else: - num_screens = num_screens - - if num_screens == 0 and not force_screenshots: - console.print('[bold green]Reusing existing screenshots. No additional screenshots needed.') - return - - if meta['debug'] and not force_screenshots: - console.print(f"[bold yellow]Saving Screens... Total needed: {self.screens}, Existing: {total_existing}, To capture: {num_screens}") - - tone_map = meta.get('tone_map', False) - if tone_map and "HDR" in meta['hdr']: - hdr_tonemap = True - else: - hdr_tonemap = False - - capture_tasks = [] - capture_results = [] - if hdr_tonemap: - task_limit = int(meta.get('tone_task_limit')) - else: - task_limit = int(meta.get('task_limit', os.cpu_count())) - - if use_vs: - from src.vs import vs_screengn - vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") - else: - if meta.get('ffdebug', False): - loglevel = 'verbose' - else: - loglevel = 'quiet' - - ss_times = self.valid_ss_time([], num_screens + 1, length, frame_rate) - existing_indices = {int(p.split('-')[-1].split('.')[0]) for p in existing_screens} - capture_tasks = [ - ( - file, - ss_times[i], - os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{len(existing_indices) + i}.png"), - keyframe, - loglevel, - hdr_tonemap - ) - for i in range(num_screens + 1) - ] - - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - capture_results = list( - tqdm( - pool.imap_unordered(self.capture_disc_task, capture_tasks), - total=len(capture_tasks), - desc="Capturing Screenshots", - ascii=True, - dynamic_ncols=False - ) - ) - finally: - pool.close() - pool.join() - - if capture_results: - if len(capture_tasks) > num_screens: - smallest = min(capture_results, key=os.path.getsize) - if meta['debug']: - console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") - os.remove(smallest) - capture_results.remove(smallest) - optimized_results = [] - optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - optimized_results = list( - tqdm( - pool.imap_unordered(self.optimize_image_task, optimize_tasks), - total=len(optimize_tasks), - desc="Optimizing Images", - ascii=True, - dynamic_ncols=False - ) - ) - finally: - pool.close() - pool.join() - - valid_results = [] - remaining_retakes = [] - for image_path in optimized_results: - if "Error" in image_path: - console.print(f"[red]{image_path}") - continue - - retake = False - image_size = os.path.getsize(image_path) - if image_size <= 75000: - console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") - retake = True - time.sleep(1) - elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: - pass - elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: - pass - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: - pass - elif not retake: - console.print("[red]Image too large for your image host, retaking.") - retake = True - time.sleep(1) - - if retake: - retry_attempts = 3 - for attempt in range(1, retry_attempts + 1): - console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") - try: - os.remove(image_path) - random_time = random.uniform(0, length) - self.capture_disc_task((file, random_time, image_path, keyframe, loglevel, hdr_tonemap)) - self.optimize_image_task((image_path, config)) - new_size = os.path.getsize(image_path) - valid_image = False - - if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": - console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: - console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: - console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") - valid_image = True - - if valid_image: - valid_results.append(image_path) - break - else: - console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") - except Exception as e: - console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") - else: - console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") - remaining_retakes.append(image_path) - else: - valid_results.append(image_path) - - if remaining_retakes: - console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") - - for image_path in valid_results: - img_dict = { - 'img_url': image_path, - 'raw_url': image_path, - 'web_url': image_path - } - meta['image_list'].append(img_dict) - - console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") - - def capture_disc_task(self, task): - file, ss_time, image_path, keyframe, loglevel, hdr_tonemap = task - try: - ff = ffmpeg.input(file, ss=ss_time, skip_frame=keyframe) - - if hdr_tonemap: - ff = ( - ff - .filter('zscale', transfer='linear') - .filter('tonemap', tonemap='mobius', desat=8.0) - .filter('zscale', transfer='bt709') - .filter('format', 'rgb24') - ) - - command = ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - ) - - command.run(capture_stdout=True, capture_stderr=True) - - return image_path - except ffmpeg.Error as e: - error_output = e.stderr.decode('utf-8') - console.print(f"[red]FFmpeg error capturing screenshot: {error_output}[/red]") - return None - except Exception as e: - console.print(f"[red]Error capturing screenshot: {e}[/red]") - return None - - def dvd_screenshots(self, meta, disc_num, num_screens=None, retry_cap=None): - if 'image_list' not in meta: - meta['image_list'] = [] - existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - - if len(existing_images) >= meta.get('cutoff') and not retry_cap: - console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) - return - - if num_screens is None: - num_screens = self.screens - len(existing_images) - if num_screens == 0 or (len(meta.get('image_list', [])) >= self.screens and disc_num == 0): - return - - if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: - i = num_screens - console.print('[bold green]Reusing screenshots') - return - - ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) - sar = 1 - for track in ifo_mi.tracks: - if track.track_type == "Video": - if isinstance(track.duration, str): - durations = [float(d) for d in track.duration.split(' / ')] - length = max(durations) / 1000 # Use the longest duration - else: - length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds - - par = float(track.pixel_aspect_ratio) - dar = float(track.display_aspect_ratio) - width = float(track.width) - height = float(track.height) - frame_rate = float(track.frame_rate) - if par < 1: - new_height = dar * height - sar = width / new_height - w_sar = 1 - h_sar = sar - else: - sar = par - w_sar = sar - h_sar = 1 - - def _is_vob_good(n, loops, num_screens): - max_loops = 6 - fallback_duration = 300 - valid_tracks = [] - - while loops < max_loops: - try: - vob_mi = MediaInfo.parse( - f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", - output='JSON' - ) - vob_mi = json.loads(vob_mi) - - for track in vob_mi.get('media', {}).get('track', []): - duration = float(track.get('Duration', 0)) - width = track.get('Width') - height = track.get('Height') - - if duration > 1 and width and height: # Minimum 1-second track - valid_tracks.append({ - 'duration': duration, - 'track_index': n - }) - - if valid_tracks: - # Sort by duration, take longest track - longest_track = max(valid_tracks, key=lambda x: x['duration']) - return longest_track['duration'], longest_track['track_index'] - - except Exception as e: - console.print(f"[red]Error parsing VOB {n}: {e}") - - n = (n + 1) % len(main_set) - loops += 1 - - return fallback_duration, 0 - - main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] - os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - voblength, n = _is_vob_good(0, 0, num_screens) - ss_times = self.valid_ss_time([], num_screens + 1, voblength, frame_rate) - tasks = [] - task_limit = int(meta.get('task_limit', os.cpu_count())) - for i in range(num_screens + 1): - image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" - input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" - tasks.append((input_file, image, ss_times[i], meta, width, height, w_sar, h_sar)) - - with get_context("spawn").Pool(processes=min(num_screens + 1, task_limit)) as pool: - try: - results = list(tqdm(pool.imap_unordered(self.capture_dvd_screenshot, tasks), total=len(tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False)) - finally: - pool.close() - pool.join() - - if len(glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*")) > num_screens: - smallest = None - smallest_size = float('inf') - for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): - screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) - try: - screen_size = os.path.getsize(screen_path) - if screen_size < smallest_size: - smallest_size = screen_size - smallest = screen_path - except FileNotFoundError: - console.print(f"[red]File not found: {screen_path}[/red]") # Handle potential edge cases - continue - - if smallest: - if meta['debug']: - console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") - os.remove(smallest) - - optimize_tasks = [(image, self.config) for image in results if image and os.path.exists(image)] - - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - optimize_results = list( # noqa F841 - tqdm( - pool.imap_unordered(self.optimize_image_task, optimize_tasks), - total=len(optimize_tasks), - desc="Optimizing Images", - ascii=True, - dynamic_ncols=False - ) - ) - finally: - pool.close() - pool.join() - - valid_results = [] - retry_attempts = 3 - - for image in optimize_results: - if "Error" in image: - console.print(f"[red]{image}") - continue - - retry_cap = False - image_size = os.path.getsize(image) - if image_size <= 120000: - console.print(f"[yellow]Image {image} is incredibly small, retaking.") - retry_cap = True - time.sleep(1) - - if retry_cap: - for attempt in range(1, retry_attempts + 1): - console.print(f"[yellow]Retaking screenshot for: {image} (Attempt {attempt}/{retry_attempts})[/yellow]") - try: - os.remove(image) - except Exception as e: - console.print(f"[red]Failed to delete {image}: {e}[/red]") - break - - image_index = int(image.rsplit('-', 1)[-1].split('.')[0]) - input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[image_index % len(main_set)]}" - adjusted_time = random.uniform(0, voblength) - - try: - self.capture_dvd_screenshot((input_file, image, adjusted_time, meta, width, height, w_sar, h_sar)) - retaken_size = os.path.getsize(image) - - if retaken_size > 75000: - console.print(f"[green]Successfully retaken screenshot for: {image} ({retaken_size} bytes)[/green]") - valid_results.append(image) - break - else: - console.print(f"[red]Retaken image {image} is still too small. Retrying...[/red]") - except Exception as e: - console.print(f"[red]Error capturing screenshot for {input_file} at {adjusted_time}: {e}[/red]") - - else: - console.print(f"[red]All retry attempts failed for {image}. Skipping.[/red]") - else: - valid_results.append(image) - - for image in valid_results: - img_dict = { - 'img_url': image, - 'raw_url': image, - 'web_url': image - } - meta['image_list'].append(img_dict) - - console.print(f"[green]Successfully captured {len(optimize_results)} screenshots.") - - def capture_dvd_screenshot(self, task): - input_file, image, seek_time, meta, width, height, w_sar, h_sar = task - - if os.path.exists(image): - console.print(f"[green]Screenshot already exists: {image}[/green]") - return image - - try: - loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' - media_info = MediaInfo.parse(input_file) - video_duration = next((track.duration for track in media_info.tracks if track.track_type == "Video"), None) - - if video_duration and seek_time > video_duration: - seek_time = max(0, video_duration - 1) - - ff = ffmpeg.input(input_file, ss=seek_time) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - - try: - ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek').run() - except ffmpeg._run.Error as e: - stderr_output = e.stderr.decode() if e.stderr else "No stderr output available" - console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {stderr_output}[/red]") - if os.path.exists(image): - return image - else: - console.print(f"[red]Screenshot creation failed for {image}[/red]") - return None - - except Exception as e: - console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {e}[/red]") - return None - - def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): - def use_tqdm(): - """Check if the environment supports TTY (interactive progress bar).""" - return sys.stdout.isatty() - - if meta['debug']: - start_time = time.time() - if 'image_list' not in meta: - meta['image_list'] = [] - - existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - - if len(existing_images) >= meta.get('cutoff') and not force_screenshots: - console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) - return - - if num_screens is None: - num_screens = self.screens - len(existing_images) - if num_screens <= 0: - return - - try: - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: - mi = json.load(f) - video_track = mi['media']['track'][1] - length = video_track.get('Duration', mi['media']['track'][0]['Duration']) - width = float(video_track.get('Width')) - height = float(video_track.get('Height')) - par = float(video_track.get('PixelAspectRatio', 1)) - dar = float(video_track.get('DisplayAspectRatio')) - frame_rate = float(video_track.get('FrameRate', 24.0)) - - if par == 1: - sar = w_sar = h_sar = 1 - elif par < 1: - new_height = dar * height - sar = width / new_height - w_sar = 1 - h_sar = sar - else: - sar = w_sar = par - h_sar = 1 - length = round(float(length)) - except (FileNotFoundError, KeyError, ValueError) as e: - console.print(f"[red]Error processing MediaInfo.json: {e}") - return - - loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' - os.chdir(f"{base_dir}/tmp/{folder_id}") - - if manual_frames: - if meta['debug']: - console.print(f"[yellow]Using manual frames: {manual_frames}") - manual_frames = [int(frame) for frame in manual_frames.split(',')] - ss_times = [frame / frame_rate for frame in manual_frames] - else: - ss_times = [] - - ss_times = self.valid_ss_time( - ss_times, - num_screens, - length, - frame_rate, - exclusion_zone=500 - ) - if meta['debug']: - console.print(f"[green]Final list of frames for screenshots: {ss_times}") - - tone_map = meta.get('tone_map', False) - if tone_map and "HDR" in meta['hdr']: - hdr_tonemap = True - else: - hdr_tonemap = False - - capture_tasks = [] - capture_results = [] - if hdr_tonemap: - task_limit = int(meta.get('tone_task_limit')) - else: - task_limit = int(meta.get('task_limit', os.cpu_count())) - - existing_images = 0 - for i in range(num_screens): - image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if os.path.exists(image_path) and not meta.get('retake', False): - existing_images += 1 - - if existing_images == num_screens and not meta.get('retake', False): - console.print("[yellow]The correct number of screenshots already exists. Skipping capture process.") - else: - for i in range(num_screens + 1): - image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image_path) or meta.get('retake', False): - capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) - elif meta['debug']: - console.print(f"[yellow]Skipping existing screenshot: {image_path}") - - if not capture_tasks: - console.print("[yellow]All screenshots already exist. Skipping capture process.") - else: - if use_tqdm(): - with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): - if isinstance(result, str) and result.startswith("Error:"): - console.print(f"[red]Capture Error: {result}") - else: - capture_results.append(result) - pbar.update(1) - finally: - pool.close() - pool.join() - else: - console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): - capture_results.append(result) - console.print(f"Processed {i}/{len(capture_tasks)} screenshots") - finally: - pool.close() - pool.join() - - if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: - smallest = min(capture_results, key=os.path.getsize) - if meta['debug']: - console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") - os.remove(smallest) - capture_results.remove(smallest) - - optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] - optimize_results = [] - if optimize_tasks: - if use_tqdm(): - with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): - optimize_results.append(result) - pbar.update(1) - finally: - pool.close() - pool.join() - else: - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): - optimize_results.append(result) - console.print(f"Optimized {i}/{len(optimize_tasks)} images") - finally: - pool.close() - pool.join() - - valid_results = [] - remaining_retakes = [] - for image_path in optimize_results: - if "Error" in image_path: - console.print(f"[red]{image_path}") - continue - - retake = False - image_size = os.path.getsize(image_path) - if not manual_frames: - if image_size <= 75000: - console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") - retake = True - time.sleep(1) - elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: - pass - elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: - pass - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: - pass - elif not retake: - console.print("[red]Image too large for your image host, retaking.") - retake = True - time.sleep(1) - - if retake: - retry_attempts = 3 - for attempt in range(1, retry_attempts + 1): - console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") - try: - os.remove(image_path) - random_time = random.uniform(0, length) - self.capture_screenshot((path, random_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) - self.optimize_image_task((image_path, config)) - new_size = os.path.getsize(image_path) - valid_image = False - - if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": - console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: - console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") - valid_image = True - elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: - console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") - valid_image = True - - if valid_image: - valid_results.append(image_path) - break - else: - console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") - except Exception as e: - console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") - else: - console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") - remaining_retakes.append(image_path) - else: - valid_results.append(image_path) - - if remaining_retakes: - console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") - - for image_path in valid_results: - img_dict = { - 'img_url': image_path, - 'raw_url': image_path, - 'web_url': image_path - } - meta['image_list'].append(img_dict) - - console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") - - if meta['debug']: - finish_time = time.time() - console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") - - def valid_ss_time(self, ss_times, num_screens, length, frame_rate, exclusion_zone=None): - total_screens = num_screens + 1 - - if exclusion_zone is None: - exclusion_zone = max(length / (3 * total_screens), length / 15) - - result_times = ss_times.copy() - section_size = (round(4 * length / 5) - round(length / 5)) / total_screens * 1.3 - section_starts = [round(length / 5) + i * (section_size * 0.9) for i in range(total_screens)] - - for section_index in range(total_screens): - valid_time = False - attempts = 0 - start_frame = round(section_starts[section_index] * frame_rate) - end_frame = round((section_starts[section_index] + section_size) * frame_rate) - - while not valid_time and attempts < 50: - attempts += 1 - frame = random.randint(start_frame, end_frame) - time = frame / frame_rate - - if all(abs(frame - existing_time * frame_rate) > exclusion_zone * frame_rate for existing_time in result_times): - result_times.append(time) - valid_time = True - - if not valid_time: - midpoint_frame = (start_frame + end_frame) // 2 - result_times.append(midpoint_frame / frame_rate) - - result_times = sorted(result_times) - - return result_times - - def capture_screenshot(self, args): - path, ss_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap = args - try: - if width <= 0 or height <= 0: - return "Error: Invalid width or height for scaling" - - if ss_time < 0: - return f"Error: Invalid timestamp {ss_time}" - - ff = ffmpeg.input(path, ss=ss_time) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - - if hdr_tonemap: - ff = ( - ff - .filter('zscale', transfer='linear') - .filter('tonemap', tonemap='mobius', desat=8.0) - .filter('zscale', transfer='bt709') - .filter('format', 'rgb24') - ) - - command = ( - ff - .output( - image_path, - vframes=1, - pix_fmt="rgb24" - ) - .overwrite_output() - .global_args('-loglevel', loglevel) - ) - - try: - command.run(capture_stdout=True, capture_stderr=True) - except ffmpeg.Error as e: - error_output = e.stderr.decode('utf-8') - return f"Error: {error_output}" - - if not os.path.exists(image_path) or os.path.getsize(image_path) == 0: - return f"Error: Screenshot not generated or is empty at {image_path}" - - return image_path - except Exception as e: - return f"Error: {str(e)}" - - def optimize_image_task(self, args): - image, config = args - try: - # Extract shared_seedbox and optimize_images from config - optimize_images = config['DEFAULT'].get('optimize_images', True) - shared_seedbox = config['DEFAULT'].get('shared_seedbox', True) - - if optimize_images: - if shared_seedbox: - # Limit the number of threads for oxipng - num_cores = multiprocessing.cpu_count() - max_threads = num_cores // 2 - os.environ['RAYON_NUM_THREADS'] = str(max_threads) - - if os.path.exists(image): - pyver = platform.python_version_tuple() - if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - import oxipng - if os.path.getsize(image) >= 16000000: - oxipng.optimize(image, level=6) - else: - oxipng.optimize(image, level=2) - return image # Return image path if successful - except (KeyboardInterrupt, Exception) as e: - return f"Error: {e}" # Return error message - """ Get type and category """ - def get_type(self, video, scene, is_disc, meta): + async def get_type(self, video, scene, is_disc, meta): if meta.get('manual_type'): type = meta.get('manual_type') else: @@ -2195,7 +679,7 @@ def get_type(self, video, scene, is_disc, meta): type = "ENCODE" return type - def get_cat(self, video): + async def get_cat(self, video): # if category is None: category = guessit(video.replace('1.0', ''))['type'] if category.lower() == "movie": @@ -2206,372 +690,10 @@ def get_cat(self, video): category = "MOVIE" return category - async def get_tmdb_from_imdb(self, meta, filename): - if meta.get('tmdb_manual') is not None: - meta['tmdb'] = meta['tmdb_manual'] - return meta - imdb_id = meta['imdb'] - if str(imdb_id)[:2].lower() != "tt": - imdb_id = f"tt{imdb_id}" - find = tmdb.Find(id=imdb_id) - info = find.info(external_source="imdb_id") - if len(info['movie_results']) >= 1: - meta['category'] = "MOVIE" - meta['tmdb'] = info['movie_results'][0]['id'] - meta['original_language'] = info['movie_results'][0].get('original_language') - elif len(info['tv_results']) >= 1: - meta['category'] = "TV" - meta['tmdb'] = info['tv_results'][0]['id'] - meta['original_language'] = info['tv_results'][0].get('original_language') - else: - imdb_info = await self.get_imdb_info_api(imdb_id.replace('tt', ''), meta) - title = imdb_info.get("title") - if title is None: - title = filename - year = imdb_info.get('year') - if year is None: - year = meta['search_year'] - console.print(f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}") - meta = await self.get_tmdb_id(title, year, meta, meta['category'], imdb_info.get('original title', imdb_info.get('localized title', meta['uuid']))) - if meta.get('tmdb') in ('None', '', None, 0, '0'): - if meta.get('mode', 'discord') == 'cli': - console.print('[yellow]Unable to find a matching TMDb entry') - tmdb_id = console.input("Please enter tmdb id: ") - parser = Args(config=self.config) - meta['category'], meta['tmdb'] = parser.parse_tmdb_id(id=tmdb_id, category=meta.get('category')) - await asyncio.sleep(2) - return meta - - async def get_tmdb_id(self, filename, search_year, meta, category, untouched_filename="", attempted=0): - search = tmdb.Search() - try: - if category == "MOVIE": - search.movie(query=filename, year=search_year) - elif category == "TV": - search.tv(query=filename, first_air_date_year=search_year) - if meta.get('tmdb_manual') is not None: - meta['tmdb'] = meta['tmdb_manual'] - else: - meta['tmdb'] = search.results[0]['id'] - meta['category'] = category - except IndexError: - try: - if category == "MOVIE": - search.movie(query=filename) - elif category == "TV": - search.tv(query=filename) - meta['tmdb'] = search.results[0]['id'] - meta['category'] = category - except IndexError: - if category == "MOVIE": - category = "TV" - else: - category = "MOVIE" - if attempted <= 1: - attempted += 1 - meta = await self.get_tmdb_id(filename, search_year, meta, category, untouched_filename, attempted) - elif attempted == 2: - attempted += 1 - meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes": ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) - if meta['tmdb'] in (None, ""): - console.print(f"[red]Unable to find TMDb match for {filename}") - if meta.get('mode', 'discord') == 'cli': - tmdb_id = cli_ui.ask_string("Please enter tmdb id in this format: tv/12345 or movie/12345") - parser = Args(config=self.config) - meta['category'], meta['tmdb'] = parser.parse_tmdb_id(id=tmdb_id, category=meta.get('category')) - meta['tmdb_manual'] = meta['tmdb'] - return meta - - return meta - - async def tmdb_other_meta(self, meta): - - if meta['tmdb'] == "0": - try: - title = guessit(meta['path'], {"excludes": ["country", "language"]})['title'].lower() - title = title.split('aka')[0] - meta = await self.get_tmdb_id(guessit(title, {"excludes": ["country", "language"]})['title'], meta['search_year'], meta) - if meta['tmdb'] == "0": - meta = await self.get_tmdb_id(title, "", meta, meta['category']) - except Exception: - if meta.get('mode', 'discord') == 'cli': - console.print("[bold red]Unable to find tmdb entry. Exiting.") - exit() - else: - console.print("[bold red]Unable to find tmdb entry") - return meta - if meta['category'] == "MOVIE": - movie = tmdb.Movies(meta['tmdb']) - response = movie.info() - meta['title'] = response['title'] - if response['release_date']: - meta['year'] = datetime.strptime(response['release_date'], '%Y-%m-%d').year - else: - console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') - meta['year'] = meta['search_year'] - external = movie.external_ids() - if meta.get('imdb', None) is None: - imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id is None: - meta['imdb_id'] = '0' - else: - meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) - else: - meta['imdb_id'] = str(meta['imdb']).replace('tt', '').zfill(7) - if meta.get('tvdb_manual'): - meta['tvdb_id'] = meta['tvdb_manual'] - else: - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' - try: - videos = movie.videos() - for each in videos.get('results', []): - if each.get('site', "") == 'YouTube' and each.get('type', "") == "Trailer": - meta['youtube'] = f"https://www.youtube.com/watch?v={each.get('key')}" - break - except Exception: - console.print('[yellow]Unable to grab videos from TMDb.') - - meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id'], meta) - if original_language is not None: - meta['original_language'] = original_language - else: - meta['original_language'] = response['original_language'] - - meta['original_title'] = response.get('original_title', meta['title']) - meta['keywords'] = self.get_keywords(movie) - meta['genres'] = self.get_genres(response) - meta['tmdb_directors'] = self.get_directors(movie) - if meta.get('anime', False) is False: - meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) - if meta.get('mal') is not None: - meta['mal_id'] = meta['mal'] - meta['poster'] = response.get('poster_path', "") - meta['tmdb_poster'] = response.get('poster_path', "") - meta['overview'] = response['overview'] - meta['tmdb_type'] = 'Movie' - meta['runtime'] = response.get('episode_run_time', 60) - elif meta['category'] == "TV": - tv = tmdb.TV(meta['tmdb']) - response = tv.info() - meta['title'] = response['name'] - if response['first_air_date']: - meta['year'] = datetime.strptime(response['first_air_date'], '%Y-%m-%d').year - else: - console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') - meta['year'] = meta['search_year'] - external = tv.external_ids() - if meta.get('imdb', None) is None: - imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id is None: - meta['imdb_id'] = '0' - else: - meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) - else: - meta['imdb_id'] = str(int(meta['imdb'].replace('tt', ''))).zfill(7) - if meta.get('tvdb_manual'): - meta['tvdb_id'] = meta['tvdb_manual'] - else: - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' - try: - videos = tv.videos() - for each in videos.get('results', []): - if each.get('site', "") == 'YouTube' and each.get('type', "") == "Trailer": - meta['youtube'] = f"https://www.youtube.com/watch?v={each.get('key')}" - break - except Exception: - console.print('[yellow]Unable to grab videos from TMDb.') - - # meta['aka'] = f" AKA {response['original_name']}" - meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id'], meta) - if original_language is not None: - meta['original_language'] = original_language - else: - meta['original_language'] = response['original_language'] - meta['original_title'] = response.get('original_name', meta['title']) - meta['keywords'] = self.get_keywords(tv) - meta['genres'] = self.get_genres(response) - meta['tmdb_directors'] = self.get_directors(tv) - meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) - if meta.get('mal') is not None: - meta['mal_id'] = meta['mal'] - meta['poster'] = response.get('poster_path', '') - meta['overview'] = response['overview'] - - meta['tmdb_type'] = response.get('type', 'Scripted') - runtime = response.get('episode_run_time', [60]) - if runtime == []: - runtime = [60] - meta['runtime'] = runtime[0] - if meta['poster'] not in (None, ''): - meta['poster'] = f"https://image.tmdb.org/t/p/original{meta['poster']}" - - difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() - if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): - meta['aka'] = "" - if f"({meta['year']})" in meta['aka']: - meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() - - return meta - - def get_keywords(self, tmdb_info): - if tmdb_info is not None: - tmdb_keywords = tmdb_info.keywords() - if tmdb_keywords.get('keywords') is not None: - keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('keywords')] - elif tmdb_keywords.get('results') is not None: - keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('results')] - return (', '.join(keywords)) - else: - return '' - - def get_genres(self, tmdb_info): - if tmdb_info is not None: - tmdb_genres = tmdb_info.get('genres', []) - if tmdb_genres is not []: - genres = [f"{genre['name'].replace(',', ' ')}" for genre in tmdb_genres] - return (', '.join(genres)) - else: - return '' - - def get_directors(self, tmdb_info): - if tmdb_info is not None: - tmdb_credits = tmdb_info.credits() - directors = [] - if tmdb_credits.get('cast', []) != []: - for each in tmdb_credits['cast']: - if each.get('known_for_department', '') == "Directing": - directors.append(each.get('original_name', each.get('name'))) - return directors - else: - return '' - - def get_anime(self, response, meta): - tmdb_name = meta['title'] - if meta.get('aka', "") == "": - alt_name = "" - else: - alt_name = meta['aka'] - anime = False - animation = False - for each in response['genres']: - if each['id'] == 16: - animation = True - if response['original_language'] == 'ja' and animation is True: - romaji, mal_id, eng_title, season_year, episodes = self.get_romaji(tmdb_name, meta.get('mal', None)) - alt_name = f" AKA {romaji}" - - anime = True - # mal = AnimeSearch(romaji) - # mal_id = mal.results[0].mal_id - else: - mal_id = 0 - if meta.get('mal_id', 0) != 0: - mal_id = meta.get('mal_id') - if meta.get('mal') is not None: - mal_id = meta.get('mal') - return mal_id, alt_name, anime - - def get_romaji(self, tmdb_name, mal): - if mal is None: - mal = 0 - tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") - tmdb_name = ' '.join(tmdb_name.split()) - query = ''' - query ($search: String) { - Page (page: 1) { - pageInfo { - total - } - media (search: $search, type: ANIME, sort: SEARCH_MATCH) { - id - idMal - title { - romaji - english - native - } - seasonYear - episodes - } - } - } - ''' - # Define our query variables and values that will be used in the query request - variables = { - 'search': tmdb_name - } - else: - query = ''' - query ($search: Int) { - Page (page: 1) { - pageInfo { - total - } - media (idMal: $search, type: ANIME, sort: SEARCH_MATCH) { - id - idMal - title { - romaji - english - native - } - seasonYear - episodes - } - } - } - ''' - # Define our query variables and values that will be used in the query request - variables = { - 'search': mal - } - - # Make the HTTP Api request - url = 'https://graphql.anilist.co' - try: - response = requests.post(url, json={'query': query, 'variables': variables}) - json = response.json() - media = json['data']['Page']['media'] - except Exception: - console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') - media = [] - if media not in (None, []): - result = {'title': {}} - difference = 0 - for anime in media: - search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) - for title in anime['title'].values(): - if title is not None: - title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) - diff = SequenceMatcher(None, title, search_name).ratio() - if diff >= difference: - result = anime - difference = diff - - romaji = result['title'].get('romaji', result['title'].get('english', "")) - mal_id = result.get('idMal', 0) - eng_title = result['title'].get('english', result['title'].get('romaji', "")) - season_year = result.get('season_year', "") - episodes = result.get('episodes', 0) - else: - romaji = eng_title = season_year = "" - episodes = mal_id = 0 - if mal_id in [None, 0]: - mal_id = mal - if not episodes: - episodes = 0 - return romaji, mal_id, eng_title, season_year, episodes - """ Mediainfo/Bdinfo > meta """ - def get_audio_v2(self, mi, meta, bdinfo): + async def get_audio_v2(self, mi, meta, bdinfo): extra = dual = "" has_commentary = False @@ -2758,7 +880,7 @@ def get_audio_v2(self, mi, meta, bdinfo): audio = ' '.join(audio.split()) return audio, chan, has_commentary - def is_3d(self, mi, bdinfo): + async def is_3d(self, mi, bdinfo): if bdinfo is not None: if bdinfo['video'][0]['3d'] != "": return "3D" @@ -2767,7 +889,7 @@ def is_3d(self, mi, bdinfo): else: return "" - def get_tag(self, video, meta): + async def get_tag(self, video, meta): try: parsed = guessit(video) release_group = parsed.get('release_group') @@ -2790,7 +912,7 @@ def get_tag(self, video, meta): return tag - def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): + async def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): try: with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: mi = json.load(f) @@ -2865,7 +987,7 @@ def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): return source, type - def get_uhd(self, type, guess, resolution, path): + async def get_uhd(self, type, guess, resolution, path): try: source = guess['Source'] other = guess['Other'] @@ -2885,7 +1007,7 @@ def get_uhd(self, type, guess, resolution, path): return uhd - def get_hdr(self, mi, bdinfo): + async def get_hdr(self, mi, bdinfo): hdr = "" dv = "" if bdinfo is not None: # Disks @@ -2929,81 +1051,7 @@ def get_hdr(self, mi, bdinfo): hdr = f"{dv} {hdr}".strip() return hdr - def get_region(self, bdinfo, region=None): - label = bdinfo.get('label', bdinfo.get('title', bdinfo.get('path', ''))).replace('.', ' ') - if region is not None: - region = region.upper() - else: - regions = { - 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', - 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', - 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', - 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', - 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', - 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', - 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', - 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', - 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', - 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', - 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', - 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', - 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', - 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', - 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', - 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', - 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', - 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', - 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', - 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', - 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', - 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', - 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', - 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', - 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', - 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', - 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', - 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', - 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', - 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', - 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', - 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', - 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', - 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', - 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR": "EUR" - } - for key, value in regions.items(): - if f" {key} " in label: - region = value - - if region is None: - region = "" - return region - - def get_distributor(self, distributor_in): - distributor_list = [ - '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDÉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', - 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SÉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ÉDITIONS', 'ARTE EDITIONS', 'ARTE VIDÉO', - 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTÖRUNG', - 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ÉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFÓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINÉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINÉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', - 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TÉLÉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', - 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車庫娛樂)', '車庫娛樂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDÉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HÄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', - 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", - 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MÉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', - 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', - 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRÖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', - 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDÉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', - 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VÉRTICE 360º', 'VÉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VÉRTIGO FILMS', 'VÉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', - 'VISUAL GROUP', 'VISUAL ENTERTAINMENT', 'VISUAL', 'VIVENDI VISUAL ENTERTAINMENT', 'VIVENDI VISUAL', 'VIVENDI', 'VIZ PICTURES', 'VIZ', 'VLMEDIA', 'VL MEDIA', 'VL', 'VOLGA', 'VVS FILMS', 'VVS', 'VZ HANDELS GMBH', 'VZ HANDELS', 'WARD RECORDS', 'WARD', 'WARNER BROS.', 'WARNER BROS', 'WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC', 'WARNER', 'WARNER MUSIC', 'WEA', 'WEINSTEIN COMPANY', 'WEINSTEIN', 'WELL GO USA', 'WELL GO', 'WELTKINO FILMVERLEIH', 'WEST VIDEO', 'WEST', 'WHITE PEARL MOVIES', 'WHITE PEARL', 'WICKED-VISION MEDIA', 'WICKED VISION MEDIA', 'WICKEDVISION MEDIA', 'WICKED-VISION', 'WICKED VISION', 'WICKEDVISION', 'WIENERWORLD', 'WILD BUNCH', 'WILD EYE RELEASING', 'WILD EYE', 'WILD SIDE VIDEO', 'WILD SIDE', 'WME', 'WOLFE VIDEO', 'WOLFE', 'WORD ON FIRE', 'WORKS FILM GROUP', 'WORLD WRESTLING', 'WVG MEDIEN', 'WWE STUDIOS', 'WWE', 'X RATED KULT', 'X-RATED KULT', 'X RATED CULT', 'X-RATED CULT', 'X RATED', 'X-RATED', 'XCESS', 'XLRATOR', 'XT VIDEO', 'XT', 'YAMATO VIDEO', 'YAMATO', 'YASH RAJ FILMS', 'YASH RAJS', 'ZEITGEIST FILMS', 'ZEITGEIST', 'ZENITH PICTURES', 'ZENITH', 'ZIMA', 'ZYLO', 'ZYX MUSIC', 'ZYX', - 'MASTERS OF CINEMA', 'MOC' - ] - distributor_out = "" - if distributor_in not in [None, "None", ""]: - for each in distributor_list: - if distributor_in.upper() == each: - distributor_out = each - return distributor_out - - def get_video_codec(self, bdinfo): + async def get_video_codec(self, bdinfo): codecs = { "MPEG-2 Video": "MPEG-2", "MPEG-4 AVC Video": "AVC", @@ -3013,7 +1061,7 @@ def get_video_codec(self, bdinfo): codec = codecs.get(bdinfo['video'][0]['codec'], "") return codec - def get_video_encode(self, mi, type, bdinfo): + async def get_video_encode(self, mi, type, bdinfo): video_encode = "" codec = "" bit_depth = '0' @@ -3058,7 +1106,7 @@ def get_video_encode(self, mi, type, bdinfo): video_codec = f"MPEG-{mi['media']['track'][1].get('Format_Version')}" return video_encode, video_codec, has_encode_settings, bit_depth - def get_edition(self, video, bdinfo, filelist, manual_edition): + async def get_edition(self, video, bdinfo, filelist, manual_edition): if video.lower().startswith('dc'): video = video.replace('dc', '', 1) @@ -3123,584 +1171,6 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): return edition, repack - """ - Create Torrent - """ - class CustomTorrent(torf.Torrent): - # Default piece size limits - torf.Torrent.piece_size_min = 16384 # 16 KiB - torf.Torrent.piece_size_max = 268435456 # 256 MiB - - def __init__(self, meta, *args, **kwargs): - super().__init__(*args, **kwargs) - - # Override piece_size_max if meta['max_piece_size'] is specified - if 'max_piece_size' in meta and meta['max_piece_size']: - try: - max_piece_size_mib = int(meta['max_piece_size']) * 1024 * 1024 # Convert MiB to bytes - self.piece_size_max = min(max_piece_size_mib, torf.Torrent.piece_size_max) - except ValueError: - self.piece_size_max = torf.Torrent.piece_size_max # Fallback to default if conversion fails - else: - self.piece_size_max = torf.Torrent.piece_size_max - - # Calculate and set the piece size - # total_size = self._calculate_total_size() - # piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) - self.metainfo['info']['piece length'] = self._piece_size - - @property - def piece_size(self): - return self._piece_size - - @piece_size.setter - def piece_size(self, value): - if value is None: - total_size = self._calculate_total_size() - value = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) - self._piece_size = value - self.metainfo['info']['piece length'] = value # Ensure 'piece length' is set - - @classmethod - def calculate_piece_size(cls, total_size, min_size, max_size, files): - file_count = len(files) - # console.print(f"[red]Calculating piece size for {file_count} files") - - our_min_size = 16384 - our_max_size = max_size if max_size else 268435456 # Default to 256 MiB if max_size is None - piece_size = 4194304 # Start with 4 MiB - - num_pieces = math.ceil(total_size / piece_size) - - # Initial torrent_file_size calculation based on file_count - # More paths = greater error in pathname_bytes, roughly recalibrate - if file_count > 1000: - torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 71 / 100) - elif file_count > 500: - torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 4 / 5) - else: - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) - - # iteration = 0 # Track the number of iterations - # print(f"Initial piece size: {piece_size} bytes") - # print(f"Initial num_pieces: {num_pieces}, Initial torrent_file_size: {torrent_file_size} bytes") - - # Adjust the piece size to fit within the constraints - while not ((750 <= num_pieces <= 2200 or num_pieces < 750 and 40960 <= torrent_file_size <= 250000) and torrent_file_size <= 250000): - # iteration += 1 - # print(f"\nIteration {iteration}:") - # print(f"Current piece_size: {piece_size} bytes") - # print(f"Current num_pieces: {num_pieces}, Current torrent_file_size: {torrent_file_size} bytes") - if num_pieces > 1000 and num_pieces < 2000 and torrent_file_size < 250000: - break - elif num_pieces < 1500 and torrent_file_size >= 250000: - piece_size *= 2 - # print(f"Doubled piece_size to {piece_size} bytes (num_pieces < 1500 and torrent_file_size >= 250 KiB)") - if piece_size > our_max_size: - piece_size = our_max_size - # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") - break - elif num_pieces < 750: - piece_size //= 2 - # print(f"Halved piece_size to {piece_size} bytes (num_pieces < 750)") - if piece_size < our_min_size: - piece_size = our_min_size - # print(f"piece_size went below min_size, set to our_min_size: {our_min_size} bytes") - break - elif 40960 < torrent_file_size < 250000: - # print(f"torrent_file_size is between 40 KiB and 250 KiB, exiting loop.") - break - elif num_pieces > 2200: - piece_size *= 2 - # print(f"Doubled piece_size to {piece_size} bytes (num_pieces > 2500)") - if piece_size > our_max_size: - piece_size = our_max_size - # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") - break - elif torrent_file_size < 2048: - # print(f"torrent_file_size is less than 2 KiB, exiting loop.") - break - elif torrent_file_size > 250000: - piece_size *= 2 - # print(f"Doubled piece_size to {piece_size} bytes (torrent_file_size > 250 KiB)") - if piece_size > our_max_size: - piece_size = our_max_size - # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") - cli_ui.warning('WARNING: .torrent size will exceed 250 KiB!') - break - - # Update num_pieces - num_pieces = math.ceil(total_size / piece_size) - - # Recalculate torrent_file_size based on file_count in each iteration - if file_count > 1000: - torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 71 / 100) - elif file_count > 500: - torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 4 / 5) - else: - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) - - # print(f"\nFinal piece_size: {piece_size} bytes after {iteration} iterations.") - # print(f"Final num_pieces: {num_pieces}, Final torrent_file_size: {torrent_file_size} bytes") - return piece_size - - def _calculate_total_size(self): - total_size = sum(file.size for file in self.files) - return total_size - - @classmethod - def _calculate_pathname_bytes(cls, files): - total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) - return total_pathname_bytes - - def validate_piece_size(self): - if not hasattr(self, '_piece_size') or self._piece_size is None: - self.piece_size = self.calculate_piece_size(self._calculate_total_size(), self.piece_size_min, self.piece_size_max, self.files) - self.metainfo['info']['piece length'] = self.piece_size # Ensure 'piece length' is set - - def create_torrent(self, meta, path, output_filename): - # Handle directories and file inclusion logic - if meta['isdir']: - if meta['keep_folder']: - cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') - path = path - else: - os.chdir(path) - globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") - no_sample_globs = [] - for file in globs: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): - no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) - if len(no_sample_globs) == 1: - path = meta['filelist'][0] - if meta['is_disc']: - include, exclude = "", "" - else: - exclude = ["*.*", "*sample.mkv", "!sample*.*"] - include = ["*.mkv", "*.mp4", "*.ts"] - - # Create and write the new torrent using the CustomTorrent class - torrent = self.CustomTorrent( - meta=meta, - path=path, - trackers=["https://fake.tracker"], - source="L4G", - private=True, - exclude_globs=exclude or [], - include_globs=include or [], - creation_date=datetime.now(), - comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" - ) - - # Ensure piece size is validated before writing - torrent.validate_piece_size() - - # Generate and write the new torrent - torrent.generate(callback=self.torf_cb, interval=5) - torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) - torrent.verify_filesize(path) - - console.print("[bold green].torrent created", end="\r") - return torrent - - def torf_cb(self, torrent, filepath, pieces_done, pieces_total): - # print(f'{pieces_done/pieces_total*100:3.0f} % done') - cli_ui.info_progress("Hashing...", pieces_done, pieces_total) - - def create_random_torrents(self, base_dir, uuid, num, path): - manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) - base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") - for i in range(1, int(num) + 1): - new_torrent = base_torrent - new_torrent.metainfo['info']['entropy'] = random.randint(1, 999999) - Torrent.copy(new_torrent).write(f"{base_dir}/tmp/{uuid}/[RAND-{i}]{manual_name}.torrent", overwrite=True) - - def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): - if os.path.exists(torrentpath): - base_torrent = Torrent.read(torrentpath) - base_torrent.trackers = ['https://fake.tracker'] - base_torrent.comment = "Created by L4G's Upload Assistant" - base_torrent.created_by = "Created by L4G's Upload Assistant" - # Remove Un-whitelisted info from torrent - for each in list(base_torrent.metainfo['info']): - if each not in ('files', 'length', 'name', 'piece length', 'pieces', 'private', 'source'): - base_torrent.metainfo['info'].pop(each, None) - for each in list(base_torrent.metainfo): - if each not in ('announce', 'comment', 'creation date', 'created by', 'encoding', 'info'): - base_torrent.metainfo.pop(each, None) - base_torrent.source = 'L4G' - base_torrent.private = True - Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) - - """ - Upload Screenshots - """ - def upload_image_task(self, args): - image, img_host, config, meta = args - try: - timeout = 60 # Default timeout - img_url, raw_url, web_url = None, None, None - - if img_host == "imgbox": - try: - # Call the asynchronous imgbox_upload function - loop = asyncio.get_event_loop() - image_list = loop.run_until_complete( - self.imgbox_upload(os.getcwd(), [image], meta, return_dict={}) - ) - if image_list and all( - 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list - ): - img_url = image_list[0]['img_url'] - raw_url = image_list[0]['raw_url'] - web_url = image_list[0]['web_url'] - else: - return { - 'status': 'failed', - 'reason': "Imgbox upload failed. No valid URLs returned." - } - except Exception as e: - return { - 'status': 'failed', - 'reason': f"Error during Imgbox upload: {str(e)}" - } - - elif img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post( - "https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout - ) - response_data = response.json() - if response_data: - code = response_data[0]['code'] - ext = response_data[0]['ext'] - img_url = f"https://ptpimg.me/{code}.{ext}" - raw_url = img_url - web_url = img_url - - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - try: - with open(image, "rb") as img_file: - encoded_image = base64.b64encode(img_file.read()).decode('utf8') - - data = { - 'key': config['DEFAULT']['imgbb_api'], - 'image': encoded_image, - } - - response = requests.post(url, data=data, timeout=timeout) - - if meta['debug']: - console.print(f"[yellow]Response status code: {response.status_code}") - console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") - - response_data = response.json() - if response.status_code != 200 or not response_data.get('success'): - console.print("[yellow]imgbb failed, trying next image host") - return {'status': 'failed', 'reason': 'imgbb upload failed'} - - img_url = response_data['data'].get('medium', {}).get('url') or response_data['data']['thumb']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] - - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - - return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} - - except requests.exceptions.Timeout: - console.print("[red]Request timed out. The server took too long to respond.") - return {'status': 'failed', 'reason': 'Request timed out'} - - except ValueError as e: # JSON decoding error - console.print(f"[red]Invalid JSON response: {e}") - return {'status': 'failed', 'reason': 'Invalid JSON response'} - - except requests.exceptions.RequestException as e: - console.print(f"[red]Request failed with error: {e}") - return {'status': 'failed', 'reason': str(e)} - - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - try: - files = { - 'source': ('file-upload[0]', open(image, 'rb')), - } - headers = { - 'X-API-Key': config['DEFAULT']['ptscreens_api'] - } - response = requests.post(url, headers=headers, files=files, timeout=timeout) - if meta['debug']: - console.print(f"[yellow]Response status code: {response.status_code}") - console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") - - response_data = response.json() - if response_data.get('status_code') != 200: - console.print("[yellow]ptscreens failed, trying next image host") - return {'status': 'failed', 'reason': 'ptscreens upload failed'} - - img_url = response_data['image']['medium']['url'] - raw_url = response_data['image']['url'] - web_url = response_data['image']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - - except requests.exceptions.Timeout: - console.print("[red]Request timed out. The server took too long to respond.") - return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: - console.print(f"[red]Request failed with error: {e}") - return {'status': 'failed', 'reason': str(e)} - - elif img_host == "oeimg": - url = "https://imgoe.download/api/1/upload" - try: - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['oeimg_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - if meta['debug']: - console.print(f"[yellow]Response status code: {response.status_code}") - console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") - - response_data = response.json() - if response.status_code != 200 or not response_data.get('success'): - console.print("[yellow]OEimg failed, trying next image host") - return {'status': 'failed', 'reason': 'OEimg upload failed'} - - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - - except requests.exceptions.Timeout: - console.print("[red]Request timed out. The server took too long to respond.") - return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: - console.print(f"[red]Request failed with error: {e}") - return {'status': 'failed', 'reason': str(e)} - - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350 - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')) - } - response = requests.post(url, data=data, files=files, timeout=timeout) - response_data = response.json() - if response.status_code == 200: - raw_url = response_data['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response_data['th_url'] - web_url = response_data['show_url'] - - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': config['DEFAULT']['lensdump_api'] - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response_data = response.json() - if response_data.get('status_code') == 200: - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] - - if img_url and raw_url and web_url: - return { - 'status': 'success', - 'img_url': img_url, - 'raw_url': raw_url, - 'web_url': web_url, - 'local_file_path': image - } - else: - return { - 'status': 'failed', - 'reason': f"Failed to upload image to {img_host}. No URLs received." - } - - except Exception as e: - return { - 'status': 'failed', - 'reason': str(e) - } - - def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): - def use_tqdm(): - """Check if the environment supports TTY (interactive progress bar).""" - return sys.stdout.isatty() - - if meta['debug']: - upload_start_time = time.time() - - import nest_asyncio - nest_asyncio.apply() - os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] - img_host = meta['imghost'] - using_custom_img_list = isinstance(custom_img_list, list) and bool(custom_img_list) - - if 'image_sizes' not in meta: - meta['image_sizes'] = {} - - if using_custom_img_list: - image_glob = custom_img_list - existing_images = [] - existing_count = 0 - else: - image_glob = glob.glob("*.png") - if 'POSTER.png' in image_glob: - image_glob.remove('POSTER.png') - image_glob = list(set(image_glob)) - if meta['debug']: - console.print("image globs:", image_glob) - - existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] - existing_count = len(existing_images) - - if not retry_mode: - images_needed = max(0, total_screens - existing_count) - else: - images_needed = total_screens - - if existing_count >= total_screens and not retry_mode and img_host == initial_img_host and not using_custom_img_list: - console.print(f"[yellow]Skipping upload because enough images are already uploaded to {img_host}. Existing images: {existing_count}, Required: {total_screens}") - return meta['image_list'], total_screens - - upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[:images_needed]] - - host_limits = { - "oeimg": 6, - "ptscreens": 1, - "lensdump": 1, - } - default_pool_size = int(meta.get('task_limit', os.cpu_count())) - pool_size = host_limits.get(img_host, default_pool_size) - - try: - with get_context("spawn").Pool(processes=max(1, min(len(upload_tasks), pool_size))) as pool: - if use_tqdm(): - try: - results = list( - tqdm( - pool.imap_unordered(self.upload_image_task, upload_tasks), - total=len(upload_tasks), - desc=f"Uploading Images to {img_host}", - ascii=True, - dynamic_ncols=False - ) - ) - finally: - pool.close() - pool.join() - else: - console.print(f"[blue]Non-TTY environment detected. Progress bar disabled. Uploading images to {img_host}.") - results = [] - for i, result in enumerate(pool.imap_unordered(self.upload_image_task, upload_tasks), 1): - results.append(result) - console.print(f"Uploaded {i}/{len(upload_tasks)} images to {img_host}") - except KeyboardInterrupt: - console.print("[red]Upload process interrupted by user. Exiting...") - pool.terminate() - pool.join() - return meta['image_list'], len(meta['image_list']) - - successfully_uploaded = [] - for result in results: - if result['status'] == 'success': - successfully_uploaded.append(result) - else: - console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") - - if len(successfully_uploaded) < meta.get('cutoff') and not retry_mode and img_host == initial_img_host and not using_custom_img_list: - img_host_num += 1 - if f'img_host_{img_host_num}' in self.config['DEFAULT']: - meta['imghost'] = self.config['DEFAULT'][f'img_host_{img_host_num}'] - console.print(f"[cyan]Switching to the next image host: {meta['imghost']}") - return self.upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - else: - console.print("[red]No more image hosts available. Aborting upload process.") - return meta['image_list'], len(meta['image_list']) - - new_images = [] - for upload in successfully_uploaded: - raw_url = upload['raw_url'] - new_image = { - 'img_url': upload['img_url'], - 'raw_url': raw_url, - 'web_url': upload['web_url'] - } - new_images.append(new_image) - if not using_custom_img_list and raw_url not in {img['raw_url'] for img in meta['image_list']}: - if meta['debug']: - console.print(f"[blue]Adding {raw_url} to image_list") - meta['image_list'].append(new_image) - local_file_path = upload.get('local_file_path') - if local_file_path: - image_size = os.path.getsize(local_file_path) - meta['image_sizes'][raw_url] = image_size - - console.print(f"[green]Successfully uploaded {len(new_images)} images.") - if meta['debug']: - upload_finish_time = time.time() - print(f"Screenshot uploads processed in {upload_finish_time - upload_start_time:.4f} seconds") - - if using_custom_img_list: - return new_images, len(new_images) - - return meta['image_list'], len(successfully_uploaded) - - async def imgbox_upload(self, chdir, image_glob, meta, return_dict): - try: - os.chdir(chdir) - image_list = [] - - async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: - for image in image_glob: - try: - async for submission in gallery.add([image]): - if not submission['success']: - console.print(f"[red]Error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") - else: - web_url = submission.get('web_url') - img_url = submission.get('thumbnail_url') - raw_url = submission.get('image_url') - if web_url and img_url and raw_url: - image_dict = { - 'web_url': web_url, - 'img_url': img_url, - 'raw_url': raw_url - } - image_list.append(image_dict) - else: - console.print(f"[red]Incomplete URLs received for image: {image}") - except Exception as e: - console.print(f"[red]Error during upload for {image}: {str(e)}") - - return_dict['image_list'] = image_list - return image_list - - except Exception as e: - console.print(f"[red]An error occurred while uploading images to imgbox: {str(e)}") - return [] - async def get_name(self, meta): type = meta.get('type', "").upper() title = meta.get('title', "") @@ -3837,294 +1307,10 @@ async def get_name(self, meta): exit() name_notag = name name = name_notag + tag - clean_name = self.clean_filename(name) + clean_name = await self.clean_filename(name) return name_notag, name, clean_name, potential_missing - async def get_season_episode(self, video, meta): - if meta['category'] == 'TV': - filelist = meta['filelist'] - meta['tv_pack'] = 0 - is_daily = False - if meta['anime'] is False: - try: - daily_match = re.search(r"\d{4}[-\.]\d{2}[-\.]\d{2}", video) - if meta.get('manual_date') or daily_match: - # Handle daily episodes - # The user either provided the --daily argument or a date was found in the filename - - if meta.get('manual_date') is None and daily_match is not None: - meta['manual_date'] = daily_match.group().replace('.', '-') - is_daily = True - guess_date = meta.get('manual_date', guessit(video).get('date')) if meta.get('manual_date') else guessit(video).get('date') - season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) - - season = f"S{str(season_int).zfill(2)}" - episode = f"E{str(episode_int).zfill(2)}" - # For daily shows, pass the supplied date as the episode title - # Season and episode will be stripped later to conform with standard daily episode naming format - meta['episode_title'] = meta.get('manual_date') - - else: - try: - guess_year = guessit(video)['year'] - except Exception: - guess_year = "" - if guessit(video)["season"] == guess_year: - if f"s{guessit(video)['season']}" in video.lower(): - season_int = str(guessit(video)["season"]) - season = "S" + season_int.zfill(2) - else: - season_int = "1" - season = "S01" - else: - season_int = str(guessit(video)["season"]) - season = "S" + season_int.zfill(2) - - except Exception: - console.print_exception() - season_int = "1" - season = "S01" - - try: - if is_daily is not True: - episodes = "" - if len(filelist) == 1: - episodes = guessit(video)['episode'] - if isinstance(episodes, list): - episode = "" - for item in guessit(video)["episode"]: - ep = (str(item).zfill(2)) - episode += f"E{ep}" - episode_int = episodes[0] - else: - episode_int = str(episodes) - episode = "E" + str(episodes).zfill(2) - else: - episode = "" - episode_int = "0" - meta['tv_pack'] = 1 - except Exception: - episode = "" - episode_int = "0" - meta['tv_pack'] = 1 - - else: - # If Anime - parsed = anitopy.parse(Path(video).name) - romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) - if mal_id: - meta['mal_id'] = mal_id - if meta.get('mal') is not None: - mal_id = meta.get('mal') - if meta.get('tmdb_manual', None) is None: - year = parsed.get('anime_year', str(seasonYear)) - meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) - meta = await self.tmdb_other_meta(meta) - if meta['category'] != "TV": - return meta - - tag = parsed.get('release_group', "") - if tag != "": - meta['tag'] = f"-{tag}" - if len(filelist) == 1: - try: - episodes = parsed.get('episode_number', guessit(video).get('episode', '1')) - if not isinstance(episodes, list) and not episodes.isnumeric(): - episodes = guessit(video)['episode'] - if isinstance(episodes, list): - episode_int = int(episodes[0]) # Always convert to integer - episode = "".join([f"E{str(int(item)).zfill(2)}" for item in episodes]) - else: - episode_int = int(episodes) # Convert to integer - episode = f"E{str(episode_int).zfill(2)}" - except Exception: - episode = "E01" - episode_int = 1 # Ensure it's an integer - console.print('[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed') - await asyncio.sleep(1.5) - else: - episode = "" - episode_int = 0 # Ensure it's an integer - meta['tv_pack'] = 1 - - try: - if meta.get('season_int'): - season_int = int(meta.get('season_int')) # Convert to integer - else: - season = parsed.get('anime_season', guessit(video).get('season', '1')) - season_int = int(season) # Convert to integer - season = f"S{str(season_int).zfill(2)}" - except Exception: - try: - if episode_int >= anilist_episodes: - params = { - 'id': str(meta['tvdb_id']), - 'origin': 'tvdb', - 'absolute': str(episode_int), - } - url = "https://thexem.info/map/single" - response = requests.post(url, params=params).json() - if response['result'] == "failure": - raise XEMNotFound # noqa: F405 - if meta['debug']: - console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") - season_int = int(response['data']['scene']['season']) # Convert to integer - season = f"S{str(season_int).zfill(2)}" - if len(filelist) == 1: - episode_int = int(response['data']['scene']['episode']) # Convert to integer - episode = f"E{str(episode_int).zfill(2)}" - else: - season_int = 1 # Default to 1 if error occurs - season = "S01" - names_url = f"https://thexem.info/map/names?origin=tvdb&id={str(meta['tvdb_id'])}" - names_response = requests.get(names_url).json() - if meta['debug']: - console.log(f'[cyan]Matching Season Number from TheXEM\n{names_response}') - difference = 0 - if names_response['result'] == "success": - for season_num, values in names_response['data'].items(): - for lang, names in values.items(): - if lang == "jp": - for name in names: - romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) - name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) - diff = SequenceMatcher(None, romaji_check, name_check).ratio() - if romaji_check in name_check and diff >= difference: - season_int = int(season_num) if season_num != "all" else 1 # Convert to integer - season = f"S{str(season_int).zfill(2)}" - difference = diff - if lang == "us": - for name in names: - eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) - name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) - diff = SequenceMatcher(None, eng_check, name_check).ratio() - if eng_check in name_check and diff >= difference: - season_int = int(season_num) if season_num != "all" else 1 # Convert to integer - season = f"S{str(season_int).zfill(2)}" - difference = diff - else: - raise XEMNotFound # noqa: F405 - except Exception: - if meta['debug']: - console.print_exception() - try: - season = guessit(video).get('season', '1') - season_int = int(season) # Convert to integer - except Exception: - season_int = 1 # Default to 1 if error occurs - season = "S01" - console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") - console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") - await asyncio.sleep(3) - - if meta.get('manual_season', None) is None: - meta['season'] = season - else: - season_int = meta['manual_season'].lower().replace('s', '') - meta['season'] = f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" - if meta.get('manual_episode', None) is None: - meta['episode'] = episode - else: - episode_int = meta['manual_episode'].lower().replace('e', '') - meta['episode'] = f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" - meta['tv_pack'] = 0 - - # if " COMPLETE " in Path(video).name.replace('.', ' '): - # meta['season'] = "COMPLETE" - meta['season_int'] = season_int - meta['episode_int'] = episode_int - - # Manual episode title - if 'manual_episode_title' in meta and meta['manual_episode_title'] == "": - meta['episode_title_storage'] = meta.get('manual_episode_title') - else: - meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') - - if meta['season'] == "S00" or meta['episode'] == "E00": - meta['episode_title'] = meta['episode_title_storage'] - - # Guess the part of the episode (if available) - meta['part'] = "" - if meta['tv_pack'] == 1: - part = guessit(os.path.dirname(video)).get('part') - meta['part'] = f"Part {part}" if part else "" - - return meta - - def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_services_only=False): - services = { - '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', - 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', - 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', - 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', - 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', - 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', - 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', - 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', - 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', - 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'CNGO': 'CNGO', 'Cinego': 'CNGO', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', - 'Crunchy Roll': 'CR', 'Crave': 'CRAV', 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', - 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', - 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', 'DC Universe': 'DCU', 'DDY': 'DDY', - 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', 'Deadhouse Films': 'DHF', - 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', 'Doc Club': 'DOCC', - 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', 'Daisuki': 'DSKI', - 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', 'EPIX': 'EPIX', 'ePix': 'EPIX', - 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', 'ETV': 'ETV', 'E!': 'ETV', - 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', 'Family Jr': 'FJR', 'FMIO': 'FMIO', - 'Filmio': 'FMIO', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', - 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', - 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', - 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', - 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', - 'hoichoi': 'HoiChoi', 'ID': 'ID', 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', - 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', 'JOYN': 'JOYN', 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', - 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Movies Anywhere': 'MA', - 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', - 'MUBI': 'MUBI', 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', - 'Netflix': 'NF', 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', - 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NRK': 'NRK', 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', - 'ORF': 'ORF', 'ORF ON': 'ORF', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', - 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', - 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RNET': 'RNET', - 'OBB Railnet': 'RNET', 'RSTR': 'RSTR', 'RTE': 'RTE', 'RTE One': 'RTE', 'RTLP': 'RTLP', 'RTL+': 'RTLP', 'RUUTU': 'RUUTU', - 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', - 'SkyShowtime': 'SKST', 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', - 'Spike': 'SPIK', 'Spike TV': 'SPKE', 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', - 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', 'Sveriges Television': 'SVT', 'SWER': 'SWER', - 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', 'TIMV': 'TIMV', 'TIMvision': 'TIMV', - 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', - 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', - 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', - 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', - 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', - 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', - 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' - } - - if get_services_only: - return services - service = guessit(video).get('streaming_service', "") - - video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) - if "DTS-HD MA" in audio: - video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") - for key, value in services.items(): - if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes": ["country", "language"]}).get('title', ''): - service = value - elif key == service: - service = value - service_longname = service - for key, value in services.items(): - if value == service and len(key) > len(service_longname): - service_longname = key - if service_longname == "Amazon Prime": - service_longname = "Amazon" - return service, service_longname - - def stream_optimized(self, stream_opt): + async def stream_optimized(self, stream_opt): if stream_opt is True: stream = 1 else: @@ -4160,7 +1346,7 @@ async def upload_image(self, session, url, data, headers, files): response = await resp.json() return response - def clean_filename(self, name): + async def clean_filename(self, name): invalid = '<>:"/\\|?*' for char in invalid: name = name.replace(char, '-') @@ -4296,158 +1482,6 @@ async def tag_override(self, meta): meta[key] = value.get(key) return meta - async def package(self, meta): - if meta['tag'] == "": - tag = "" - else: - tag = f" / {meta['tag'][1:]}" - if meta['is_disc'] == "DVD": - res = meta['source'] - else: - res = meta['resolution'] - - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/GENERIC_INFO.txt", 'w', encoding="utf-8") as generic: - generic.write(f"Name: {meta['name']}\n\n") - generic.write(f"Overview: {meta['overview']}\n\n") - generic.write(f"{res} / {meta['type']}{tag}\n\n") - generic.write(f"Category: {meta['category']}\n") - generic.write(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}\n") - if meta['imdb_id'] != "0": - generic.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") - if meta['tvdb_id'] != "0": - generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") - if "tvmaze_id" in meta and meta['tvmaze_id'] != "0": - generic.write(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}\n") - poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" - if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): - if meta.get('rehosted_poster', None) is None: - r = requests.get(meta['poster'], stream=True) - if r.status_code == 200: - console.print("[bold yellow]Rehosting Poster") - r.raw.decode_content = True - with open(poster_img, 'wb') as f: - shutil.copyfileobj(r.raw, f) - poster, dummy = self.upload_screens(meta, 1, 1, 0, 1, [poster_img], {}) - poster = poster[0] - generic.write(f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n") - meta['rehosted_poster'] = poster.get('raw_url', poster.get('img_url')) - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: - json.dump(meta, metafile, indent=4) - metafile.close() - else: - console.print("[bold yellow]Poster could not be retrieved") - elif os.path.exists(poster_img) and meta.get('rehosted_poster') is not None: - generic.write(f"TMDB Poster: {meta.get('rehosted_poster')}\n") - if len(meta['image_list']) > 0: - generic.write("\nImage Webpage:\n") - for each in meta['image_list']: - generic.write(f"{each['web_url']}\n") - generic.write("\nThumbnail Image:\n") - for each in meta['image_list']: - generic.write(f"{each['img_url']}\n") - title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) - archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" - torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent") - if isinstance(torrent_files, list) and len(torrent_files) > 1: - for each in torrent_files: - if not each.startswith(('BASE', '[RAND')): - os.remove(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{each}")) - try: - if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): - base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) - Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) - # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) - filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) - shutil.make_archive(archive, 'tar', f"{meta['base_dir']}/tmp/{meta['uuid']}") - if filebrowser is not None: - url = '/'.join(s.strip('/') for s in (filebrowser, f"/tmp/{meta['uuid']}")) - url = urllib.parse.quote(url, safe="https://") - else: - files = { - "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) - } - response = requests.post("https://uguu.se/upload.php", files=files).json() - if meta['debug']: - console.print(f"[cyan]{response}") - url = response['files'][0]['url'] - return url - except Exception: - return False - return - - async def get_imdb_aka_api(self, imdb_id, meta): - if imdb_id == "0": - return "", None - if not imdb_id.startswith("tt"): - imdb_id = f"tt{imdb_id}" - url = "https://api.graphql.imdb.com/" - query = { - "query": f""" - query {{ - title(id: "{imdb_id}") {{ - id - titleText {{ - text - isOriginalTitle - }} - originalTitleText {{ - text - }} - countriesOfOrigin {{ - countries {{ - id - }} - }} - }} - }} - """ - } - - headers = { - "Content-Type": "application/json", - } - - response = requests.post(url, headers=headers, json=query) - data = response.json() - - # Check if `data` and `title` exist - title_data = data.get("data", {}).get("title") - if title_data is None: - console.print("Title data is missing from response") - return "", None - - # Extract relevant fields from the response - aka = title_data.get("originalTitleText", {}).get("text", "") - is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) - if meta.get('manual_language'): - original_language = meta.get('manual_language') - else: - original_language = None - - if not is_original and aka: - aka = f" AKA {aka}" - - return aka, original_language - - async def get_imdb_aka(self, imdb_id): - if imdb_id == "0": - return "", None - if not imdb_id.startswith("tt"): - imdb_id = f"tt{imdb_id}" - ia = Cinemagoer() - result = ia.get_movie(imdb_id.replace('tt', '')) - original_language = result.get('language codes') - if isinstance(original_language, list): - if len(original_language) > 1: - original_language = None - elif len(original_language) == 1: - original_language = original_language[0] - aka = result.get('original title', result.get('localized title', "")).replace(' - IMDb', '').replace('\u00ae', '') - if aka != "": - aka = f" AKA {aka}" - return aka, original_language - async def get_dvd_size(self, discs, manual_dvds): sizes = [] dvd_sizes = [] @@ -4466,355 +1500,3 @@ async def get_dvd_size(self, discs, manual_dvds): compact = str(manual_dvds) return compact - - def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imdbid): - if not is_disc: - if mediainfo['media']['track'][0].get('extra'): - extra = mediainfo['media']['track'][0]['extra'] - for each in extra: - if each.lower().startswith('tmdb'): - parser = Args(config=self.config) - category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) - if each.lower().startswith('imdb'): - try: - imdbid = str(int(extra[each].replace('tt', ''))).zfill(7) - except Exception: - pass - return category, tmdbid, imdbid - - def daily_to_tmdb_season_episode(self, tmdbid, date): - show = tmdb.TV(tmdbid) - seasons = show.info().get('seasons') - season = 1 - episode = 1 - date = datetime.fromisoformat(str(date)) - for each in seasons: - air_date = datetime.fromisoformat(each['air_date']) - if air_date <= date: - season = int(each['season_number']) - season_info = tmdb.TV_Seasons(tmdbid, season).info().get('episodes') - for each in season_info: - if str(each['air_date']) == str(date.date()): - episode = int(each['episode_number']) - break - else: - console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") - return season, episode - - def safe_get(self, data, path, default=None): - for key in path: - if isinstance(data, dict): - data = data.get(key, default) - else: - return default - return data - - async def get_imdb_info_api(self, imdbID, meta): - imdb_info = { - 'title': meta['title'], - 'year': meta['year'], - 'aka': '', - 'type': None, - 'runtime': meta.get('runtime', '60'), - 'cover': meta.get('poster'), - } - if len(meta.get('tmdb_directors', [])) >= 1: - imdb_info['directors'] = meta['tmdb_directors'] - - if imdbID == "0": - return imdb_info - else: - try: - if not imdbID.startswith("tt"): - imdbIDtt = f"tt{imdbID}" - else: - imdbIDtt = imdbID - except Exception: - return imdb_info - query = { - "query": f""" - query GetTitleInfo {{ - title(id: "{imdbIDtt}") {{ - id - titleText {{ - text - isOriginalTitle - }} - originalTitleText {{ - text - }} - releaseYear {{ - year - }} - titleType {{ - id - }} - plot {{ - plotText {{ - plainText - }} - }} - ratingsSummary {{ - aggregateRating - voteCount - }} - primaryImage {{ - url - }} - runtime {{ - displayableProperty {{ - value {{ - plainText - }} - }} - seconds - }} - titleGenres {{ - genres {{ - genre {{ - text - }} - }} - }} - principalCredits {{ - category {{ - text - id - }} - credits {{ - name {{ - id - nameText {{ - text - }} - }} - }} - }} - }} - }} - """ - } - - url = "https://api.graphql.imdb.com/" - headers = {"Content-Type": "application/json"} - - response = requests.post(url, json=query, headers=headers) - data = response.json() - - if response.status_code != 200: - return imdb_info - - title_data = self.safe_get(data, ["data", "title"], {}) - if not data or "data" not in data or "title" not in data["data"]: - return imdb_info - - imdb_info['imdbID'] = imdbID - imdb_info['title'] = self.safe_get(title_data, ['titleText', 'text'], meta['title']) - imdb_info['year'] = self.safe_get(title_data, ['releaseYear', 'year'], meta['year']) - original_title = self.safe_get(title_data, ['originalTitleText', 'text'], '') - imdb_info['aka'] = original_title if original_title and original_title != imdb_info['title'] else imdb_info['title'] - imdb_info['type'] = self.safe_get(title_data, ['titleType', 'id'], None) - runtime_seconds = self.safe_get(title_data, ['runtime', 'seconds'], 0) - imdb_info['runtime'] = str(runtime_seconds // 60 if runtime_seconds else 60) - imdb_info['cover'] = self.safe_get(title_data, ['primaryImage', 'url'], meta.get('poster', '')) - imdb_info['plot'] = self.safe_get(title_data, ['plot', 'plotText', 'plainText'], 'No plot available') - genres = self.safe_get(title_data, ['titleGenres', 'genres'], []) - genre_list = [self.safe_get(g, ['genre', 'text'], '') for g in genres] - imdb_info['genres'] = ', '.join(filter(None, genre_list)) - imdb_info['rating'] = self.safe_get(title_data, ['ratingsSummary', 'aggregateRating'], 'N/A') - imdb_info['directors'] = [] - principal_credits = self.safe_get(title_data, ['principalCredits'], []) - if isinstance(principal_credits, list): - for pc in principal_credits: - category_text = self.safe_get(pc, ['category', 'text'], '') - if 'Direct' in category_text: - credits = self.safe_get(pc, ['credits'], []) - for c in credits: - name_id = self.safe_get(c, ['name', 'id'], '') - if name_id.startswith('nm'): - imdb_info['directors'].append(name_id) - break - if meta.get('manual_language'): - imdb_info['original_langauge'] = meta.get('manual_language') - - return imdb_info - - async def get_imdb_info(self, imdbID, meta): - imdb_info = {} - if int(str(imdbID).replace('tt', '')) != 0: - ia = Cinemagoer() - info = ia.get_movie(imdbID) - imdb_info['title'] = info.get('title') - imdb_info['year'] = info.get('year') - imdb_info['aka'] = info.get('original title', info.get('localized title', imdb_info['title'])).replace(' - IMDb', '') - imdb_info['type'] = info.get('kind') - imdb_info['imdbID'] = info.get('imdbID') - imdb_info['runtime'] = info.get('runtimes', ['0'])[0] - imdb_info['cover'] = info.get('full-size cover url', '').replace(".jpg", "._V1_FMjpg_UX750_.jpg") - imdb_info['plot'] = info.get('plot', [''])[0] - imdb_info['genres'] = ', '.join(info.get('genres', '')) - imdb_info['rating'] = info.get('rating', 'N/A') - imdb_info['original_language'] = info.get('language codes') - if isinstance(imdb_info['original_language'], list): - if len(imdb_info['original_language']) > 1: - imdb_info['original_language'] = None - elif len(imdb_info['original_language']) == 1: - imdb_info['original_language'] = imdb_info['original_language'][0] - if imdb_info['cover'] == '': - imdb_info['cover'] = meta.get('poster', '') - if len(info.get('directors', [])) >= 1: - imdb_info['directors'] = [] - for director in info.get('directors'): - imdb_info['directors'].append(f"nm{director.getID()}") - else: - imdb_info = { - 'title': meta['title'], - 'year': meta['year'], - 'aka': '', - 'type': None, - 'runtime': meta.get('runtime', '60'), - 'cover': meta.get('poster'), - } - if len(meta.get('tmdb_directors', [])) >= 1: - imdb_info['directors'] = meta['tmdb_directors'] - return imdb_info - - async def search_imdb(self, filename, search_year): - imdbID = '0' - ia = Cinemagoer() - search = ia.search_movie(filename) - for movie in search: - if filename in movie.get('title', ''): - if movie.get('year') == search_year: - imdbID = str(movie.movieID).replace('tt', '') - return imdbID - - async def imdb_other_meta(self, meta): - imdb_info = meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) - meta['title'] = imdb_info['title'] - meta['year'] = imdb_info['year'] - meta['aka'] = imdb_info['aka'] - meta['poster'] = imdb_info['cover'] - meta['original_language'] = imdb_info['original_language'] - meta['overview'] = imdb_info['plot'] - meta['imdb_rating'] = imdb_info['rating'] - - difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() - if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): - meta['aka'] = "" - if f"({meta['year']})" in meta['aka']: - meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() - return meta - - async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): - try: - tvdbID = int(tvdbID) if tvdbID is not None else 0 - except ValueError: - print(f"Error: tvdbID is not a valid integer. Received: {tvdbID}") - tvdbID = 0 - - if meta.get('tvmaze_manual'): - tvmazeID = int(meta['tvmaze_manual']) - return tvmazeID, imdbID, tvdbID - else: - tvmazeID = 0 - results = [] - - if imdbID is None: - imdbID = '0' - - if meta['manual_date'] is None: - if int(tvdbID) != 0: - tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) - if tvdb_resp: - results.append(tvdb_resp) - else: - if int(imdbID) != 0: - imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) - if imdb_resp: - results.append(imdb_resp) - else: - search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) - if search_resp: - if isinstance(search_resp, list): - results.extend([each['show'] for each in search_resp if 'show' in each]) - else: - results.append(search_resp) - else: - if int(tvdbID) != 0: - tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) - if tvdb_resp: - results.append(tvdb_resp) - if int(imdbID) != 0: - imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) - if imdb_resp: - results.append(imdb_resp) - search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) - if search_resp: - if isinstance(search_resp, list): - results.extend([each['show'] for each in search_resp if 'show' in each]) - else: - results.append(search_resp) - - if year not in (None, ''): - results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] - - seen = set() - unique_results = [] - for show in results: - if show['id'] not in seen: - seen.add(show['id']) - unique_results.append(show) - results = unique_results - - if not results: - if meta['debug']: - print("No results found.") - return tvmazeID, imdbID, tvdbID - - if meta['manual_date'] is not None: - print("Search results:") - for idx, show in enumerate(results): - console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") - console.print(f"[yellow] Premiered: {show.get('premiered', 'Unknown')}[/yellow]") - console.print(f" Externals: {json.dumps(show.get('externals', {}), indent=2)}") - - while True: - try: - choice = int(input(f"Enter the number of the correct show (1-{len(results)}) or 0 to skip: ")) - if choice == 0: - print("Skipping selection.") - break - if 1 <= choice <= len(results): - selected_show = results[choice - 1] - tvmazeID = selected_show['id'] - print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") - break - else: - print(f"Invalid choice. Please choose a number between 1 and {len(results)}, or 0 to skip.") - except ValueError: - print("Invalid input. Please enter a number.") - else: - selected_show = results[0] - tvmazeID = selected_show['id'] - if meta['debug']: - print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") - - if meta['debug']: - print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") - return tvmazeID, imdbID, tvdbID - - def _make_tvmaze_request(self, url, params, meta): - if meta['debug']: - print(f"Requesting TVmaze API: {url} with params: {params}") - try: - resp = requests.get(url, params=params) - if resp.ok: - return resp.json() - else: - if meta['debug']: - print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") - return None - except Exception as e: - print(f"Error making TVmaze request: {e}") - return None diff --git a/src/queuemanage.py b/src/queuemanage.py new file mode 100644 index 00000000..b36ebd5b --- /dev/null +++ b/src/queuemanage.py @@ -0,0 +1,292 @@ +import os +import json +import glob +import click +import re + +from src.console import console +from rich.markdown import Markdown +from rich.style import Style + + +async def get_log_file(base_dir, queue_name): + """ + Returns the path to the log file for the given base directory and queue name. + """ + safe_queue_name = queue_name.replace(" ", "_") + return os.path.join(base_dir, "tmp", f"{safe_queue_name}_processed_files.log") + + +async def load_processed_files(log_file): + """ + Loads the list of processed files from the log file. + """ + if os.path.exists(log_file): + with open(log_file, "r") as f: + return set(json.load(f)) + return set() + + +async def gather_files_recursive(path, allowed_extensions=None): + """ + Gather files and first-level subfolders. + Each subfolder is treated as a single unit, without exploring deeper. + """ + queue = [] + if os.path.isdir(path): + for entry in os.scandir(path): + if entry.is_dir(): + queue.append(entry.path) + elif entry.is_file() and (allowed_extensions is None or entry.name.lower().endswith(tuple(allowed_extensions))): + queue.append(entry.path) + elif os.path.isfile(path): + if allowed_extensions is None or path.lower().endswith(tuple(allowed_extensions)): + queue.append(path) + else: + console.print(f"[red]Invalid path: {path}") + return queue + + +async def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): + """ + Handle glob patterns and split path resolution. + Treat subfolders as single units and filter files by allowed_extensions. + """ + queue = [] + if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: + escaped_path = path.replace('[', '[[]') + queue = [ + file for file in glob.glob(escaped_path) + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + if queue: + await display_queue(queue) + elif os.path.exists(os.path.dirname(path)) and len(paths) > 1: + queue = [ + file for file in paths + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + await display_queue(queue) + elif not os.path.exists(os.path.dirname(path)): + queue = [ + file for file in resolve_split_path(path) # noqa F821 + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + await display_queue(queue) + return queue + + +async def extract_safe_file_locations(log_file): + """ + Parse the log file to extract file locations under the 'safe' header. + + :param log_file: Path to the log file to parse. + :return: List of file paths from the 'safe' section. + """ + safe_section = False + safe_file_locations = [] + + with open(log_file, 'r') as f: + for line in f: + line = line.strip() + + # Detect the start and end of 'safe' sections + if line.lower() == "safe": + safe_section = True + continue + elif line.lower() in {"danger", "risky"}: + safe_section = False + + # Extract 'File Location' if in a 'safe' section + if safe_section and line.startswith("File Location:"): + match = re.search(r"File Location:\s*(.+)", line) + if match: + safe_file_locations.append(match.group(1).strip()) + + return safe_file_locations + + +async def display_queue(queue, base_dir, queue_name, save_to_log=True): + """Displays the queued files in markdown format and optionally saves them to a log file in the tmp directory.""" + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + + if save_to_log: + tmp_dir = os.path.join(base_dir, "tmp") + os.makedirs(tmp_dir, exist_ok=True) + log_file = os.path.join(tmp_dir, f"{queue_name}_queue.log") + + try: + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue successfully saved to log file: {log_file}") + except Exception as e: + console.print(f"[bold red]Failed to save queue to log file: {e}") + + +async def handle_queue(path, meta, paths, base_dir): + allowed_extensions = ['.mkv', '.mp4', '.ts'] + queue = [] + + log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") + allowed_extensions = ['.mkv', '.mp4', '.ts'] + + if path.endswith('.txt') and meta.get('unit3d'): + console.print(f"[bold yellow]Detected a text file for queue input: {path}[/bold yellow]") + if os.path.exists(path): + safe_file_locations = await extract_safe_file_locations(path) + if safe_file_locations: + console.print(f"[cyan]Extracted {len(safe_file_locations)} safe file locations from the text file.[/cyan]") + queue = safe_file_locations + meta['queue'] = "unit3d" + + # Save the queue to the log file + try: + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue log file saved successfully: {log_file}[/bold green]") + except IOError as e: + console.print(f"[bold red]Failed to save the queue log file: {e}[/bold red]") + exit(1) + else: + console.print("[bold red]No safe file locations found in the text file. Exiting.[/bold red]") + exit(1) + else: + console.print(f"[bold red]Text file not found: {path}. Exiting.[/bold red]") + exit(1) + + elif path.endswith('.log') and meta['debug']: + console.print(f"[bold yellow]Processing debugging queue:[/bold yellow] [bold green{path}[/bold green]") + if os.path.exists(path): + log_file = path + with open(path, 'r') as f: + queue = json.load(f) + meta['queue'] = "debugging" + + else: + console.print(f"[bold red]Log file not found: {path}. Exiting.[/bold red]") + exit(1) + + elif meta.get('queue'): + if os.path.exists(log_file): + with open(log_file, 'r') as f: + existing_queue = json.load(f) + console.print(f"[bold yellow]Found an existing queue log file:[/bold yellow] [green]{log_file}[/green]") + console.print(f"[cyan]The queue log contains {len(existing_queue)} items.[/cyan]") + console.print("[cyan]Do you want to edit, discard, or keep the existing queue?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ").strip().lower() + + if edit_choice == 'e': + edited_content = click.edit(json.dumps(existing_queue, indent=4)) + if edited_content: + try: + queue = json.loads(edited_content.strip()) + console.print("[bold green]Successfully updated the queue from the editor.") + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + except json.JSONDecodeError as e: + console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") + queue = existing_queue + else: + console.print("[bold red]No changes were made. Using the original queue.") + queue = existing_queue + elif edit_choice == 'd': + console.print("[bold yellow]Discarding the existing queue log. Creating a new queue.") + queue = [] + else: + console.print("[bold green]Keeping the existing queue as is.") + queue = existing_queue + else: + if os.path.exists(path): + queue = await gather_files_recursive(path, allowed_extensions=allowed_extensions) + else: + queue = await resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) + + console.print(f"[cyan]A new queue log file will be created:[/cyan] [green]{log_file}[/green]") + console.print(f"[cyan]The new queue will contain {len(queue)} items.[/cyan]") + console.print("[cyan]Do you want to edit the initial queue before saving?[/cyan]") + edit_choice = input("Enter 'e' to edit, or press Enter to save as is: ").strip().lower() + + if edit_choice == 'e': + edited_content = click.edit(json.dumps(queue, indent=4)) + if edited_content: + try: + queue = json.loads(edited_content.strip()) + console.print("[bold green]Successfully updated the queue from the editor.") + except json.JSONDecodeError as e: + console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") + else: + console.print("[bold red]No changes were made. Using the original queue.") + + # Save the queue to the log file + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue log file created: {log_file}[/bold green]") + + elif os.path.exists(path): + queue = [path] + + else: + # Search glob if dirname exists + if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: + escaped_path = path.replace('[', '[[]') + globs = glob.glob(escaped_path) + queue = globs + if len(queue) != 0: + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + else: + console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") + + elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: + queue = paths + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + elif not os.path.exists(os.path.dirname(path)): + split_path = path.split() + p1 = split_path[0] + for i, each in enumerate(split_path): + try: + if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): + queue.append(p1) + p1 = split_path[i + 1] + else: + p1 += f" {split_path[i + 1]}" + except IndexError: + if os.path.exists(p1): + queue.append(p1) + else: + console.print(f"[red]Path: [bold red]{p1}[/bold red] does not exist") + if len(queue) >= 1: + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + + else: + # Add Search Here + console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") + exit() + + if not queue: + console.print(f"[red]No valid files or directories found for path: {path}") + exit(1) + + if meta.get('queue'): + queue_name = meta['queue'] + log_file = await get_log_file(base_dir, meta['queue']) + processed_files = await load_processed_files(log_file) + queue = [file for file in queue if file not in processed_files] + if not queue: + console.print(f"[bold yellow]All files in the {meta['queue']} queue have already been processed.") + exit(0) + if meta['debug']: + await display_queue(queue, base_dir, queue_name, save_to_log=False) + + return queue, log_file diff --git a/src/region.py b/src/region.py new file mode 100644 index 00000000..4f78a305 --- /dev/null +++ b/src/region.py @@ -0,0 +1,152 @@ +import re +from guessit import guessit + + +async def get_region(bdinfo, region=None): + label = bdinfo.get('label', bdinfo.get('title', bdinfo.get('path', ''))).replace('.', ' ') + if region is not None: + region = region.upper() + else: + regions = { + 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', + 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', + 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', + 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', + 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', + 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', + 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', + 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', + 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', + 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', + 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', + 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', + 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', + 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', + 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', + 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', + 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', + 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', + 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', + 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', + 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', + 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', + 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', + 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', + 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', + 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', + 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', + 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', + 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', + 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', + 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', + 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', + 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', + 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', + 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR": "EUR" + } + for key, value in regions.items(): + if f" {key} " in label: + region = value + + if region is None: + region = "" + return region + + +async def get_distributor(distributor_in): + distributor_list = [ + '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDÉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', + 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SÉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ÉDITIONS', 'ARTE EDITIONS', 'ARTE VIDÉO', + 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTÖRUNG', + 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ÉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFÓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINÉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINÉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', + 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TÉLÉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', + 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車庫娛樂)', '車庫娛樂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDÉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HÄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', + 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", + 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MÉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', + 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', + 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRÖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', + 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDÉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', + 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VÉRTICE 360º', 'VÉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VÉRTIGO FILMS', 'VÉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', + 'VISUAL GROUP', 'VISUAL ENTERTAINMENT', 'VISUAL', 'VIVENDI VISUAL ENTERTAINMENT', 'VIVENDI VISUAL', 'VIVENDI', 'VIZ PICTURES', 'VIZ', 'VLMEDIA', 'VL MEDIA', 'VL', 'VOLGA', 'VVS FILMS', 'VVS', 'VZ HANDELS GMBH', 'VZ HANDELS', 'WARD RECORDS', 'WARD', 'WARNER BROS.', 'WARNER BROS', 'WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC', 'WARNER', 'WARNER MUSIC', 'WEA', 'WEINSTEIN COMPANY', 'WEINSTEIN', 'WELL GO USA', 'WELL GO', 'WELTKINO FILMVERLEIH', 'WEST VIDEO', 'WEST', 'WHITE PEARL MOVIES', 'WHITE PEARL', 'WICKED-VISION MEDIA', 'WICKED VISION MEDIA', 'WICKEDVISION MEDIA', 'WICKED-VISION', 'WICKED VISION', 'WICKEDVISION', 'WIENERWORLD', 'WILD BUNCH', 'WILD EYE RELEASING', 'WILD EYE', 'WILD SIDE VIDEO', 'WILD SIDE', 'WME', 'WOLFE VIDEO', 'WOLFE', 'WORD ON FIRE', 'WORKS FILM GROUP', 'WORLD WRESTLING', 'WVG MEDIEN', 'WWE STUDIOS', 'WWE', 'X RATED KULT', 'X-RATED KULT', 'X RATED CULT', 'X-RATED CULT', 'X RATED', 'X-RATED', 'XCESS', 'XLRATOR', 'XT VIDEO', 'XT', 'YAMATO VIDEO', 'YAMATO', 'YASH RAJ FILMS', 'YASH RAJS', 'ZEITGEIST FILMS', 'ZEITGEIST', 'ZENITH PICTURES', 'ZENITH', 'ZIMA', 'ZYLO', 'ZYX MUSIC', 'ZYX', + 'MASTERS OF CINEMA', 'MOC' + ] + distributor_out = "" + if distributor_in not in [None, "None", ""]: + for each in distributor_list: + if distributor_in.upper() == each: + distributor_out = each + return distributor_out + + +async def get_service(video=None, tag=None, audio=None, guess_title=None, get_services_only=False): + services = { + '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', + 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', + 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', + 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', + 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', + 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', + 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', + 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', + 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', + 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'CNGO': 'CNGO', 'Cinego': 'CNGO', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', + 'Crunchy Roll': 'CR', 'Crave': 'CRAV', 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', + 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', + 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', 'DC Universe': 'DCU', 'DDY': 'DDY', + 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', 'Deadhouse Films': 'DHF', + 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', 'Doc Club': 'DOCC', + 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', 'Daisuki': 'DSKI', + 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', 'EPIX': 'EPIX', 'ePix': 'EPIX', + 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', 'ETV': 'ETV', 'E!': 'ETV', + 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', 'Family Jr': 'FJR', 'FMIO': 'FMIO', + 'Filmio': 'FMIO', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', + 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', + 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', + 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', + 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', + 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', + 'hoichoi': 'HoiChoi', 'ID': 'ID', 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', + 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', 'JOYN': 'JOYN', 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', + 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Movies Anywhere': 'MA', + 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', + 'MUBI': 'MUBI', 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', + 'Netflix': 'NF', 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', + 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NRK': 'NRK', 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', + 'ORF': 'ORF', 'ORF ON': 'ORF', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', + 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', + 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', + 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RNET': 'RNET', + 'OBB Railnet': 'RNET', 'RSTR': 'RSTR', 'RTE': 'RTE', 'RTE One': 'RTE', 'RTLP': 'RTLP', 'RTL+': 'RTLP', 'RUUTU': 'RUUTU', + 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', + 'SkyShowtime': 'SKST', 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', + 'Spike': 'SPIK', 'Spike TV': 'SPKE', 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', + 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', 'Sveriges Television': 'SVT', 'SWER': 'SWER', + 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', 'TIMV': 'TIMV', 'TIMvision': 'TIMV', + 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', + 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', + 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', + 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', + 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', + 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', + 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' + } + + if get_services_only: + return services + service = guessit(video).get('streaming_service', "") + + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) + if "DTS-HD MA" in audio: + video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") + for key, value in services.items(): + if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes": ["country", "language"]}).get('title', ''): + service = value + elif key == service: + service = value + service_longname = service + for key, value in services.items(): + if value == service and len(key) > len(service_longname): + service_longname = key + if service_longname == "Amazon Prime": + service_longname = "Amazon" + return service, service_longname diff --git a/src/takescreens.py b/src/takescreens.py new file mode 100644 index 00000000..5210ad86 --- /dev/null +++ b/src/takescreens.py @@ -0,0 +1,884 @@ +from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed +from tqdm import tqdm +import os +import re +import glob +import time +import ffmpeg +import random +import json +import sys +import platform +from pymediainfo import MediaInfo +from src.console import console + +from data.config import config # Import here to avoid dependency issues + +img_host = [ + config["DEFAULT"][key].lower() + for key in sorted(config["DEFAULT"].keys()) + if key.startswith("img_host_1") +] +screens = int(config['DEFAULT'].get('screens', 6)) +cutoff = int(config['DEFAULT'].get('cutoff_screens', 3)) +task_limit = config['DEFAULT'].get('task_limit', "0") +if int(task_limit) > 0: + task_limit = task_limit +tone_map = config['DEFAULT'].get('tone_map', False) +tone_task_limit = config['DEFAULT'].get('tone_task_limit', "0") +if int(tone_task_limit) > 0: + tone_task_limit = tone_task_limit + + +def sanitize_filename(filename): + # Replace invalid characters like colons with an underscore + return re.sub(r'[<>:"/\\|?*]', '_', filename) + + +def disc_screenshots(meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None, force_screenshots=False): + if meta['debug']: + start_time = time.time() + if 'image_list' not in meta: + meta['image_list'] = [] + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= meta.get('cutoff') and not force_screenshots: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) + return + + if num_screens is None: + num_screens = screens + if num_screens == 0 or len(image_list) >= num_screens: + return + + sanitized_filename = sanitize_filename(filename) + length = 0 + file = None + frame_rate = None + for each in bdinfo['files']: + # Calculate total length in seconds, including fractional part + int_length = sum(float(x) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) + + if int_length > length: + length = int_length + for root, dirs, files in os.walk(bdinfo['path']): + for name in files: + if name.lower() == each['file'].lower(): + file = os.path.join(root, name) + break # Stop searching once the file is found + + if 'video' in bdinfo and bdinfo['video']: + fps_string = bdinfo['video'][0].get('fps', None) + if fps_string: + try: + frame_rate = float(fps_string.split(' ')[0]) # Extract and convert to float + except ValueError: + console.print("[red]Error: Unable to parse frame rate from bdinfo['video'][0]['fps']") + + keyframe = 'nokey' if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "" else 'none' + print(f"File: {file}, Length: {length}, Frame Rate: {frame_rate}") + os.chdir(f"{base_dir}/tmp/{folder_id}") + existing_screens = glob.glob(f"{sanitized_filename}-*.png") + total_existing = len(existing_screens) + len(existing_images) + if not force_screenshots: + num_screens = max(0, screens - total_existing) + else: + num_screens = num_screens + + if num_screens == 0 and not force_screenshots: + console.print('[bold green]Reusing existing screenshots. No additional screenshots needed.') + return + + if meta['debug'] and not force_screenshots: + console.print(f"[bold yellow]Saving Screens... Total needed: {screens}, Existing: {total_existing}, To capture: {num_screens}") + + tone_map = meta.get('tone_map', False) + if tone_map and "HDR" in meta['hdr']: + hdr_tonemap = True + else: + hdr_tonemap = False + + capture_tasks = [] + capture_results = [] + if use_vs: + from src.vs import vs_screengn + vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + else: + if meta.get('ffdebug', False): + loglevel = 'verbose' + else: + loglevel = 'quiet' + + ss_times = valid_ss_time([], num_screens + 1, length, frame_rate) + existing_indices = {int(p.split('-')[-1].split('.')[0]) for p in existing_screens} + capture_tasks = [ + ( + file, + ss_times[i], + os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{len(existing_indices) + i}.png"), + keyframe, + loglevel, + hdr_tonemap + ) + for i in range(num_screens + 1) + ] + + max_workers = min(len(capture_tasks), int(meta.get('task_limit', os.cpu_count()))) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(capture_disc_task, task): task for task in capture_tasks} + + if sys.stdout.isatty(): # Check if running in terminal + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True) as pbar: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + capture_results.append(result) + else: + console.print(f"[red]{result}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + capture_results.append(result) + else: + console.print(f"[red]{result}") + + if capture_results and len(capture_results) > num_screens: + try: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)") + os.remove(smallest) + capture_results.remove(smallest) + except Exception as e: + console.print(f"[red]Error removing smallest image: {str(e)}") + + optimized_results = [] + optimize_tasks = [(result, config) for result in capture_results if result and os.path.exists(result)] + max_workers = min(len(optimize_tasks), int(meta.get('task_limit', os.cpu_count()))) + + with ProcessPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(optimize_image_task, task): task for task in optimize_tasks} + + if sys.stdout.isatty(): + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True) as pbar: + for future in as_completed(future_to_task): + try: + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + optimized_results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error in optimization task: {str(e)}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + try: + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + optimized_results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error in optimization task: {str(e)}") + + valid_results = [] + remaining_retakes = [] + for image_path in optimized_results: + if "Error" in image_path: + console.print(f"[red]{image_path}") + continue + + retake = False + image_size = os.path.getsize(image_path) + if image_size <= 75000: + console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") + retake = True + elif "imgbb" in img_host and image_size <= 31000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for imgbb.[/green]") + elif any(host in ["imgbox", "pixhost"] for host in img_host) and image_size <= 10000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + elif any(host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] for host in img_host): + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + else: + console.print("[red]Image size does not meet requirements for your image host, retaking.") + retake = True + + if retake: + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image_path) + random_time = random.uniform(0, length) + capture_disc_task((file, random_time, image_path, keyframe, loglevel, hdr_tonemap)) + optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if "imgbb" in img_host and new_size > 75000 and new_size <= 31000000: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and any(host in ["imgbox", "pixhost"] for host in img_host): + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and any(host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] for host in img_host): + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") + else: + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) + else: + valid_results.append(image_path) + + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + + console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") + + if meta['debug']: + finish_time = time.time() + console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") + + +def capture_disc_task(task): + file, ss_time, image_path, keyframe, loglevel, hdr_tonemap = task + try: + ff = ffmpeg.input(file, ss=ss_time, skip_frame=keyframe) + if hdr_tonemap: + ff = ( + ff + .filter('zscale', transfer='linear') + .filter('tonemap', tonemap='mobius', desat=8.0) + .filter('zscale', transfer='bt709') + .filter('format', 'rgb24') + ) + + command = ( + ff + .output(image_path, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + ) + command.run(capture_stdout=True, capture_stderr=True) + + return image_path + except ffmpeg.Error as e: + error_output = e.stderr.decode('utf-8') + console.print(f"[red]FFmpeg error capturing screenshot: {error_output}[/red]") + return None + except Exception as e: + console.print(f"[red]Error capturing screenshot: {e}[/red]") + return None + + +def dvd_screenshots(meta, disc_num, num_screens=None, retry_cap=None): + if 'image_list' not in meta: + meta['image_list'] = [] + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= meta.get('cutoff') and not retry_cap: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) + return + screens = meta.get('screens', 6) + if num_screens is None: + num_screens = screens - len(existing_images) + if num_screens == 0 or (len(meta.get('image_list', [])) >= screens and disc_num == 0): + return + + if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: + i = num_screens + console.print('[bold green]Reusing screenshots') + return + + ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) + sar = 1 + for track in ifo_mi.tracks: + if track.track_type == "Video": + if isinstance(track.duration, str): + durations = [float(d) for d in track.duration.split(' / ')] + length = max(durations) / 1000 # Use the longest duration + else: + length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds + + par = float(track.pixel_aspect_ratio) + dar = float(track.display_aspect_ratio) + width = float(track.width) + height = float(track.height) + frame_rate = float(track.frame_rate) + if par < 1: + new_height = dar * height + sar = width / new_height + w_sar = 1 + h_sar = sar + else: + sar = par + w_sar = sar + h_sar = 1 + + def _is_vob_good(n, loops, num_screens): + max_loops = 6 + fallback_duration = 300 + valid_tracks = [] + + while loops < max_loops: + try: + vob_mi = MediaInfo.parse( + f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", + output='JSON' + ) + vob_mi = json.loads(vob_mi) + + for track in vob_mi.get('media', {}).get('track', []): + duration = float(track.get('Duration', 0)) + width = track.get('Width') + height = track.get('Height') + + if duration > 1 and width and height: # Minimum 1-second track + valid_tracks.append({ + 'duration': duration, + 'track_index': n + }) + + if valid_tracks: + # Sort by duration, take longest track + longest_track = max(valid_tracks, key=lambda x: x['duration']) + return longest_track['duration'], longest_track['track_index'] + + except Exception as e: + console.print(f"[red]Error parsing VOB {n}: {e}") + + n = (n + 1) % len(main_set) + loops += 1 + + return fallback_duration, 0 + + main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] + os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") + voblength, n = _is_vob_good(0, 0, num_screens) + ss_times = valid_ss_time([], num_screens + 1, voblength, frame_rate) + capture_tasks = [] + existing_images = 0 + existing_image_paths = [] + + for i in range(num_screens + 1): + image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" + if os.path.exists(image) and not meta.get('retake', False): + existing_images += 1 + existing_image_paths.append(image) + + if meta['debug']: + console.print(f"Found {existing_images} existing screenshots") + + if existing_images == num_screens and not meta.get('retake', False): + console.print("[yellow]The correct number of screenshots already exists. Skipping capture process.") + capture_results = existing_image_paths + return + else: + for i in range(num_screens + 1): + image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" + if not os.path.exists(image) and not meta.get('retake', False): + capture_tasks.append((input_file, image, ss_times[i], meta, width, height, w_sar, h_sar)) + + capture_results = [] + max_workers = min(len(capture_tasks), int(meta.get('task_limit', os.cpu_count()))) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(capture_dvd_screenshot, task): task for task in capture_tasks} + + if sys.stdout.isatty(): # Check if running in terminal + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True) as pbar: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + capture_results.append(result) + else: + console.print(f"[red]{result}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + capture_results.append(result) + else: + console.print(f"[red]{result}") + + if capture_results and len(capture_results) > num_screens: + smallest = None + smallest_size = float('inf') + for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): + screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) + try: + screen_size = os.path.getsize(screen_path) + if screen_size < smallest_size: + smallest_size = screen_size + smallest = screen_path + except FileNotFoundError: + console.print(f"[red]File not found: {screen_path}[/red]") # Handle potential edge cases + continue + + if smallest: + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") + os.remove(smallest) + + optimize_results = [] + optimize_tasks = [(result, config) for result in capture_results if result and os.path.exists(result)] + + max_workers = min(len(optimize_tasks), int(meta.get('task_limit', os.cpu_count()))) + + with ProcessPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(optimize_image_task, task): task for task in optimize_tasks} + + if sys.stdout.isatty(): + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True) as pbar: + for future in as_completed(future_to_task): + try: + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + optimize_results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error in optimization task: {str(e)}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + try: + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + optimize_results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error in optimization task: {str(e)}") + + valid_results = [] + remaining_retakes = [] + + for image in optimize_results: + if "Error" in image: + console.print(f"[red]{image}") + continue + + retake = False + image_size = os.path.getsize(image) + if image_size <= 120000: + console.print(f"[yellow]Image {image} is incredibly small, retaking.") + retake = True + + if retake: + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image) + except Exception as e: + console.print(f"[red]Failed to delete {image}: {e}[/red]") + break + + image_index = int(image.rsplit('-', 1)[-1].split('.')[0]) + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[image_index % len(main_set)]}" + adjusted_time = random.uniform(0, voblength) + + try: + capture_dvd_screenshot((input_file, image, adjusted_time, meta, width, height, w_sar, h_sar)) + retaken_size = os.path.getsize(image) + + if retaken_size > 75000: + console.print(f"[green]Successfully retaken screenshot for: {image} ({retaken_size} bytes)[/green]") + valid_results.append(image) + break + else: + console.print(f"[red]Retaken image {image} is still too small. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {adjusted_time}: {e}[/red]") + + else: + console.print(f"[red]All retry attempts failed for {image}. Skipping.[/red]") + remaining_retakes.append(image) + else: + valid_results.append(image) + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + + console.print(f"[green]Successfully captured {len(optimize_results)} screenshots.") + + +def capture_dvd_screenshot(task): + input_file, image, seek_time, meta, width, height, w_sar, h_sar = task + + if os.path.exists(image): + console.print(f"[green]Screenshot already exists: {image}[/green]") + return image + + try: + loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' + media_info = MediaInfo.parse(input_file) + video_duration = next((track.duration for track in media_info.tracks if track.track_type == "Video"), None) + + if video_duration and seek_time > video_duration: + seek_time = max(0, video_duration - 1) + + ff = ffmpeg.input(input_file, ss=seek_time) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + + try: + ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek').run() + except ffmpeg._run.Error as e: + stderr_output = e.stderr.decode() if e.stderr else "No stderr output available" + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {stderr_output}[/red]") + if os.path.exists(image): + return image + else: + console.print(f"[red]Screenshot creation failed for {image}[/red]") + return None + + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {e}[/red]") + return None + + +def screenshots(path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): + """Screenshot capture function using concurrent.futures""" + if meta['debug']: + start_time = time.time() + if 'image_list' not in meta: + meta['image_list'] = [] + + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= meta.get('cutoff') and not force_screenshots: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) + return + + if num_screens is None: + num_screens = screens - len(existing_images) + if num_screens <= 0: + return + + try: + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: + mi = json.load(f) + video_track = mi['media']['track'][1] + length = float(video_track.get('Duration', mi['media']['track'][0]['Duration'])) + width = float(video_track.get('Width')) + height = float(video_track.get('Height')) + par = float(video_track.get('PixelAspectRatio', 1)) + dar = float(video_track.get('DisplayAspectRatio')) + frame_rate = float(video_track.get('FrameRate', 24.0)) + + if par == 1: + sar = w_sar = h_sar = 1 + elif par < 1: + new_height = dar * height + sar = width / new_height + w_sar = 1 + h_sar = sar + else: + sar = w_sar = par + h_sar = 1 + except Exception as e: + console.print(f"[red]Error processing MediaInfo.json: {e}") + return + + loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' + os.chdir(f"{base_dir}/tmp/{folder_id}") + + if manual_frames: + if meta['debug']: + console.print(f"[yellow]Using manual frames: {manual_frames}") + manual_frames = [int(frame) for frame in manual_frames.split(',')] + ss_times = [frame / frame_rate for frame in manual_frames] + else: + ss_times = valid_ss_time([], num_screens + 1, length, frame_rate, exclusion_zone=500) + + if meta['debug']: + console.print(f"[green]Final list of frames for screenshots: {ss_times}") + + existing_images = 0 + existing_image_paths = [] + for i in range(num_screens + 1): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if os.path.exists(image_path) and not meta.get('retake', False): + existing_images += 1 + existing_image_paths.append(image_path) + + if meta['debug']: + console.print(f"Found {existing_images} existing screenshots") + + tone_map = meta.get('tone_map', False) + if tone_map and "HDR" in meta['hdr']: + hdr_tonemap = True + else: + hdr_tonemap = False + + capture_tasks = [] + if existing_images == num_screens and not meta.get('retake', False): + console.print("[yellow]The correct number of screenshots already exists. Skipping capture process.") + capture_results = existing_image_paths + return + else: + for i in range(num_screens + 1): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if not os.path.exists(image_path) or meta.get('retake', False): + capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) + + capture_results = [] + max_workers = min(len(capture_tasks), int(meta.get('task_limit', os.cpu_count()))) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(capture_screenshot, task): task for task in capture_tasks} + + if sys.stdout.isatty(): # Check if running in terminal + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True) as pbar: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + capture_results.append(result) + else: + console.print(f"[red]{result}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + capture_results.append(result) + else: + console.print(f"[red]{result}") + + if capture_results and len(capture_results) > num_screens: + try: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)") + os.remove(smallest) + capture_results.remove(smallest) + except Exception as e: + console.print(f"[red]Error removing smallest image: {str(e)}") + + # Optimize images using ThreadPoolExecutor + optimize_results = [] + optimize_tasks = [(result, config) for result in capture_results if result and os.path.exists(result)] + max_workers = min(len(optimize_tasks), int(meta.get('task_limit', os.cpu_count()))) + + with ProcessPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(optimize_image_task, task): task for task in optimize_tasks} + + if sys.stdout.isatty(): + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True) as pbar: + for future in as_completed(future_to_task): + try: + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + optimize_results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error in optimization task: {str(e)}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + try: + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + optimize_results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error in optimization task: {str(e)}") + + valid_results = [] + remaining_retakes = [] + for image_path in optimize_results: + if "Error" in image_path: + console.print(f"[red]{image_path}") + continue + + retake = False + image_size = os.path.getsize(image_path) + if not manual_frames: + if image_size <= 75000: + console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") + retake = True + elif "imgbb" in img_host and image_size <= 31000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for imgbb.[/green]") + elif any(host in ["imgbox", "pixhost"] for host in img_host) and image_size <= 10000000: + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + elif any(host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] for host in img_host): + if meta['debug']: + console.print(f"[green]Image {image_path} meets size requirements for {img_host}.[/green]") + else: + console.print("[red]Image size does not meet requirements for your image host, retaking.") + retake = True + + if retake: + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image_path) + random_time = random.uniform(0, length) + capture_screenshot((path, random_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) + optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if "imgbb" in img_host and new_size > 75000 and new_size <= 31000000: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and any(host in ["imgbox", "pixhost"] for host in img_host): + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and any(host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] for host in img_host): + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") + else: + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) + else: + valid_results.append(image_path) + + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + + console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") + + if meta['debug']: + finish_time = time.time() + console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") + + +def valid_ss_time(ss_times, num_screens, length, frame_rate, exclusion_zone=None): + total_screens = num_screens + 1 + + if exclusion_zone is None: + exclusion_zone = max(length / (3 * total_screens), length / 15) + + result_times = ss_times.copy() + section_size = (round(4 * length / 5) - round(length / 5)) / total_screens * 1.3 + section_starts = [round(length / 5) + i * (section_size * 0.9) for i in range(total_screens)] + + for section_index in range(total_screens): + valid_time = False + attempts = 0 + start_frame = round(section_starts[section_index] * frame_rate) + end_frame = round((section_starts[section_index] + section_size) * frame_rate) + + while not valid_time and attempts < 50: + attempts += 1 + frame = random.randint(start_frame, end_frame) + time = frame / frame_rate + + if all(abs(frame - existing_time * frame_rate) > exclusion_zone * frame_rate for existing_time in result_times): + result_times.append(time) + valid_time = True + + if not valid_time: + midpoint_frame = (start_frame + end_frame) // 2 + result_times.append(midpoint_frame / frame_rate) + + result_times = sorted(result_times) + + return result_times + + +def capture_screenshot(args): + path, ss_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap = args + try: + if width <= 0 or height <= 0: + return "Error: Invalid width or height for scaling" + + if ss_time < 0: + return f"Error: Invalid timestamp {ss_time}" + + ff = ffmpeg.input(path, ss=ss_time) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + + if hdr_tonemap: + ff = ( + ff + .filter('zscale', transfer='linear') + .filter('tonemap', tonemap='mobius', desat=10.0) + .filter('zscale', transfer='bt709') + .filter('format', 'rgb24') + ) + + command = ( + ff + .output( + image_path, + vframes=1, + pix_fmt="rgb24" + ) + .overwrite_output() + .global_args('-loglevel', loglevel) + ) + + try: + command.run(capture_stdout=True, capture_stderr=True) + except ffmpeg.Error as e: + error_output = e.stderr.decode('utf-8') + return f"Error: {error_output}" + + if not os.path.exists(image_path) or os.path.getsize(image_path) == 0: + return f"Error: Screenshot not generated or is empty at {image_path}" + + return image_path + except Exception as e: + return f"Error: {str(e)}" + + +def optimize_image_task(args): + image, config = args + try: + # Extract shared_seedbox and optimize_images from config + optimize_images = config['DEFAULT'].get('optimize_images', True) + shared_seedbox = config['DEFAULT'].get('shared_seedbox', True) + + if optimize_images: + if shared_seedbox: + # Limit the number of threads for oxipng + num_cores = os.cpu_count() + max_threads = max(1, num_cores // 2) # Ensure at least 1 thread + os.environ['RAYON_NUM_THREADS'] = str(max_threads) + + if os.path.exists(image): + pyver = platform.python_version_tuple() + if int(pyver[0]) == 3 and int(pyver[1]) >= 7: + import oxipng + if os.path.getsize(image) >= 16000000: + oxipng.optimize(image, level=6) + else: + oxipng.optimize(image, level=2) + return image # Return image path if successful + except (KeyboardInterrupt, Exception) as e: + return f"Error: {e}" # Return error message diff --git a/src/tmdb.py b/src/tmdb.py new file mode 100644 index 00000000..5deabf87 --- /dev/null +++ b/src/tmdb.py @@ -0,0 +1,418 @@ +from src.console import console +from src.imdb import get_imdb_aka_api, get_imdb_info_api +from src.args import Args +from data.config import config +import tmdbsimple as tmdb +import re +import asyncio +from guessit import guessit +import cli_ui +import anitopy +from datetime import datetime +from difflib import SequenceMatcher +import requests + + +async def get_tmdb_from_imdb(meta, filename): + if meta.get('tmdb_manual') is not None: + meta['tmdb'] = meta['tmdb_manual'] + return meta + imdb_id = meta['imdb'] + if str(imdb_id)[:2].lower() != "tt": + imdb_id = f"tt{imdb_id}" + find = tmdb.Find(id=imdb_id) + info = find.info(external_source="imdb_id") + if len(info['movie_results']) >= 1: + meta['category'] = "MOVIE" + meta['tmdb'] = info['movie_results'][0]['id'] + meta['original_language'] = info['movie_results'][0].get('original_language') + elif len(info['tv_results']) >= 1: + meta['category'] = "TV" + meta['tmdb'] = info['tv_results'][0]['id'] + meta['original_language'] = info['tv_results'][0].get('original_language') + else: + imdb_info = await get_imdb_info_api(imdb_id.replace('tt', ''), meta) + title = imdb_info.get("title") + if title is None: + title = filename + year = imdb_info.get('year') + if year is None: + year = meta['search_year'] + console.print(f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}") + meta = await get_tmdb_id(title, year, meta, meta['category'], imdb_info.get('original title', imdb_info.get('localized title', meta['uuid']))) + if meta.get('tmdb') in ('None', '', None, 0, '0'): + if meta.get('mode', 'discord') == 'cli': + console.print('[yellow]Unable to find a matching TMDb entry') + tmdb_id = console.input("Please enter tmdb id: ") + parser = Args(config=config) + meta['category'], meta['tmdb'] = parser.parse_tmdb_id(id=tmdb_id, category=meta.get('category')) + await asyncio.sleep(2) + return meta + + +async def get_tmdb_id(filename, search_year, meta, category, untouched_filename="", attempted=0): + search = tmdb.Search() + try: + if category == "MOVIE": + search.movie(query=filename, year=search_year) + elif category == "TV": + search.tv(query=filename, first_air_date_year=search_year) + if meta.get('tmdb_manual') is not None: + meta['tmdb'] = meta['tmdb_manual'] + else: + meta['tmdb'] = search.results[0]['id'] + meta['category'] = category + except IndexError: + try: + if category == "MOVIE": + search.movie(query=filename) + elif category == "TV": + search.tv(query=filename) + meta['tmdb'] = search.results[0]['id'] + meta['category'] = category + except IndexError: + if category == "MOVIE": + category = "TV" + else: + category = "MOVIE" + if attempted <= 1: + attempted += 1 + meta = await get_tmdb_id(filename, search_year, meta, category, untouched_filename, attempted) + elif attempted == 2: + attempted += 1 + meta = await get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes": ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) + if meta['tmdb'] in (None, ""): + console.print(f"[red]Unable to find TMDb match for {filename}") + if meta.get('mode', 'discord') == 'cli': + tmdb_id = cli_ui.ask_string("Please enter tmdb id in this format: tv/12345 or movie/12345") + parser = Args(config=config) + meta['category'], meta['tmdb'] = parser.parse_tmdb_id(id=tmdb_id, category=meta.get('category')) + meta['tmdb_manual'] = meta['tmdb'] + return meta + + return meta + + +async def tmdb_other_meta(meta): + if meta['tmdb'] == "0": + try: + title = guessit(meta['path'], {"excludes": ["country", "language"]})['title'].lower() + title = title.split('aka')[0] + meta = await get_tmdb_id(guessit(title, {"excludes": ["country", "language"]})['title'], meta['search_year'], meta) + if meta['tmdb'] == "0": + meta = await get_tmdb_id(title, "", meta, meta['category']) + except Exception: + if meta.get('mode', 'discord') == 'cli': + console.print("[bold red]Unable to find tmdb entry. Exiting.") + exit() + else: + console.print("[bold red]Unable to find tmdb entry") + return meta + if meta['category'] == "MOVIE": + movie = tmdb.Movies(meta['tmdb']) + response = movie.info() + meta['title'] = response['title'] + if response['release_date']: + meta['year'] = datetime.strptime(response['release_date'], '%Y-%m-%d').year + else: + console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') + meta['year'] = meta['search_year'] + external = movie.external_ids() + if meta.get('imdb', None) is None: + imdb_id = external.get('imdb_id', "0") + if imdb_id == "" or imdb_id is None: + meta['imdb_id'] = '0' + else: + meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) + else: + meta['imdb_id'] = str(meta['imdb']).replace('tt', '').zfill(7) + if meta.get('tvdb_manual'): + meta['tvdb_id'] = meta['tvdb_manual'] + else: + if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: + meta['tvdb_id'] = external.get('tvdb_id', '0') + if meta['tvdb_id'] in ["", None, " ", "None"]: + meta['tvdb_id'] = '0' + try: + videos = movie.videos() + for each in videos.get('results', []): + if each.get('site', "") == 'YouTube' and each.get('type', "") == "Trailer": + meta['youtube'] = f"https://www.youtube.com/watch?v={each.get('key')}" + break + except Exception: + console.print('[yellow]Unable to grab videos from TMDb.') + + meta['aka'], original_language = await get_imdb_aka_api(meta['imdb_id'], meta) + if original_language is not None: + meta['original_language'] = original_language + else: + meta['original_language'] = response['original_language'] + + meta['original_title'] = response.get('original_title', meta['title']) + meta['keywords'] = await get_keywords(movie) + meta['genres'] = await get_genres(response) + meta['tmdb_directors'] = await get_directors(movie) + if meta.get('anime', False) is False: + meta['mal_id'], meta['aka'], meta['anime'] = await get_anime(response, meta) + if meta.get('mal') is not None: + meta['mal_id'] = meta['mal'] + meta['poster'] = response.get('poster_path', "") + meta['tmdb_poster'] = response.get('poster_path', "") + meta['overview'] = response['overview'] + meta['tmdb_type'] = 'Movie' + meta['runtime'] = response.get('episode_run_time', 60) + elif meta['category'] == "TV": + tv = tmdb.TV(meta['tmdb']) + response = tv.info() + meta['title'] = response['name'] + if response['first_air_date']: + meta['year'] = datetime.strptime(response['first_air_date'], '%Y-%m-%d').year + else: + console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') + meta['year'] = meta['search_year'] + external = tv.external_ids() + if meta.get('imdb', None) is None: + imdb_id = external.get('imdb_id', "0") + if imdb_id == "" or imdb_id is None: + meta['imdb_id'] = '0' + else: + meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) + else: + meta['imdb_id'] = str(int(meta['imdb'].replace('tt', ''))).zfill(7) + if meta.get('tvdb_manual'): + meta['tvdb_id'] = meta['tvdb_manual'] + else: + if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: + meta['tvdb_id'] = external.get('tvdb_id', '0') + if meta['tvdb_id'] in ["", None, " ", "None"]: + meta['tvdb_id'] = '0' + try: + videos = tv.videos() + for each in videos.get('results', []): + if each.get('site', "") == 'YouTube' and each.get('type', "") == "Trailer": + meta['youtube'] = f"https://www.youtube.com/watch?v={each.get('key')}" + break + except Exception: + console.print('[yellow]Unable to grab videos from TMDb.') + + # meta['aka'] = f" AKA {response['original_name']}" + meta['aka'], original_language = await get_imdb_aka_api(meta['imdb_id'], meta) + if original_language is not None: + meta['original_language'] = original_language + else: + meta['original_language'] = response['original_language'] + meta['original_title'] = response.get('original_name', meta['title']) + meta['keywords'] = await get_keywords(tv) + meta['genres'] = await get_genres(response) + meta['tmdb_directors'] = await get_directors(tv) + meta['mal_id'], meta['aka'], meta['anime'] = await get_anime(response, meta) + if meta.get('mal') is not None: + meta['mal_id'] = meta['mal'] + meta['poster'] = response.get('poster_path', '') + meta['overview'] = response['overview'] + + meta['tmdb_type'] = response.get('type', 'Scripted') + runtime = response.get('episode_run_time', [60]) + if runtime == []: + runtime = [60] + meta['runtime'] = runtime[0] + if meta['poster'] not in (None, ''): + meta['poster'] = f"https://image.tmdb.org/t/p/original{meta['poster']}" + + difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() + if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): + meta['aka'] = "" + if f"({meta['year']})" in meta['aka']: + meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() + + return meta + + +async def get_keywords(tmdb_info): + if tmdb_info is not None: + tmdb_keywords = tmdb_info.keywords() + if tmdb_keywords.get('keywords') is not None: + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('keywords')] + elif tmdb_keywords.get('results') is not None: + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('results')] + return (', '.join(keywords)) + else: + return '' + + +async def get_genres(tmdb_info): + if tmdb_info is not None: + tmdb_genres = tmdb_info.get('genres', []) + if tmdb_genres is not []: + genres = [f"{genre['name'].replace(',', ' ')}" for genre in tmdb_genres] + return (', '.join(genres)) + else: + return '' + + +async def get_directors(tmdb_info): + if tmdb_info is not None: + tmdb_credits = tmdb_info.credits() + directors = [] + if tmdb_credits.get('cast', []) != []: + for each in tmdb_credits['cast']: + if each.get('known_for_department', '') == "Directing": + directors.append(each.get('original_name', each.get('name'))) + return directors + else: + return '' + + +async def get_anime(response, meta): + tmdb_name = meta['title'] + if meta.get('aka', "") == "": + alt_name = "" + else: + alt_name = meta['aka'] + anime = False + animation = False + for each in response['genres']: + if each['id'] == 16: + animation = True + if response['original_language'] == 'ja' and animation is True: + romaji, mal_id, eng_title, season_year, episodes = await get_romaji(tmdb_name, meta.get('mal', None)) + alt_name = f" AKA {romaji}" + + anime = True + # mal = AnimeSearch(romaji) + # mal_id = mal.results[0].mal_id + else: + mal_id = 0 + if meta.get('mal_id', 0) != 0: + mal_id = meta.get('mal_id') + if meta.get('mal') is not None: + mal_id = meta.get('mal') + return mal_id, alt_name, anime + + +async def get_romaji(tmdb_name, mal): + if mal is None: + mal = 0 + tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") + tmdb_name = ' '.join(tmdb_name.split()) + query = ''' + query ($search: String) { + Page (page: 1) { + pageInfo { + total + } + media (search: $search, type: ANIME, sort: SEARCH_MATCH) { + id + idMal + title { + romaji + english + native + } + seasonYear + episodes + } + } + } + ''' + # Define our query variables and values that will be used in the query request + variables = { + 'search': tmdb_name + } + else: + query = ''' + query ($search: Int) { + Page (page: 1) { + pageInfo { + total + } + media (idMal: $search, type: ANIME, sort: SEARCH_MATCH) { + id + idMal + title { + romaji + english + native + } + seasonYear + episodes + } + } + } + ''' + # Define our query variables and values that will be used in the query request + variables = { + 'search': mal + } + + # Make the HTTP Api request + url = 'https://graphql.anilist.co' + try: + response = requests.post(url, json={'query': query, 'variables': variables}) + json = response.json() + media = json['data']['Page']['media'] + except Exception: + console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') + media = [] + if media not in (None, []): + result = {'title': {}} + difference = 0 + for anime in media: + search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) + for title in anime['title'].values(): + if title is not None: + title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) + diff = SequenceMatcher(None, title, search_name).ratio() + if diff >= difference: + result = anime + difference = diff + + romaji = result['title'].get('romaji', result['title'].get('english', "")) + mal_id = result.get('idMal', 0) + eng_title = result['title'].get('english', result['title'].get('romaji', "")) + season_year = result.get('season_year', "") + episodes = result.get('episodes', 0) + else: + romaji = eng_title = season_year = "" + episodes = mal_id = 0 + if mal_id in [None, 0]: + mal_id = mal + if not episodes: + episodes = 0 + return romaji, mal_id, eng_title, season_year, episodes + + +async def get_tmdb_imdb_from_mediainfo(mediainfo, category, is_disc, tmdbid, imdbid): + if not is_disc: + if mediainfo['media']['track'][0].get('extra'): + extra = mediainfo['media']['track'][0]['extra'] + for each in extra: + if each.lower().startswith('tmdb'): + parser = Args(config=config) + category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) + if each.lower().startswith('imdb'): + try: + imdbid = str(int(extra[each].replace('tt', ''))).zfill(7) + except Exception: + pass + return category, tmdbid, imdbid + + +async def daily_to_tmdb_season_episode(tmdbid, date): + show = tmdb.TV(tmdbid) + seasons = show.info().get('seasons') + season = 1 + episode = 1 + date = datetime.fromisoformat(str(date)) + for each in seasons: + air_date = datetime.fromisoformat(each['air_date']) + if air_date <= date: + season = int(each['season_number']) + season_info = tmdb.TV_Seasons(tmdbid, season).info().get('episodes') + for each in season_info: + if str(each['air_date']) == str(date.date()): + episode = int(each['episode_number']) + break + else: + console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") + return season, episode diff --git a/src/torrentcreate.py b/src/torrentcreate.py new file mode 100644 index 00000000..b4ba48a3 --- /dev/null +++ b/src/torrentcreate.py @@ -0,0 +1,197 @@ +from datetime import datetime +import torf +from torf import Torrent +import random +import math +import os +import re +import cli_ui +import glob +from src.console import console + + +def calculate_piece_size(total_size, min_size, max_size, files, meta): + # Set piece_size_max before calling super().__init__ + if 'max_piece_size' in meta and meta['max_piece_size']: + try: + max_piece_size_mib = int(meta['max_piece_size']) * 1024 * 1024 # Convert MiB to bytes + max_size = min(max_piece_size_mib, torf.Torrent.piece_size_max) + except ValueError: + max_size = 268435456 # Fallback to default if conversion fails + else: + max_size = 268435456 + + file_count = len(files) + our_min_size = 16384 + our_max_size = max_size + if meta['debug']: + console.print(f"Max size: {max_size}") + piece_size = 4194304 # Start with 4 MiB + + num_pieces = math.ceil(total_size / piece_size) + + # Initial torrent_file_size calculation based on file_count + pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + if file_count > 1000: + torrent_file_size = 20 + (num_pieces * 20) + int(pathname_bytes * 71 / 100) + elif file_count > 500: + torrent_file_size = 20 + (num_pieces * 20) + int(pathname_bytes * 4 / 5) + else: + torrent_file_size = 20 + (num_pieces * 20) + pathname_bytes + + # Adjust the piece size to fit within the constraints + while not ((750 <= num_pieces <= 2200 or num_pieces < 750 and 40960 <= torrent_file_size <= 250000) and torrent_file_size <= 250000): + if num_pieces > 1000 and num_pieces < 2000 and torrent_file_size < 250000: + break + elif num_pieces < 1500 and torrent_file_size >= 250000: + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + break + elif num_pieces < 750: + piece_size //= 2 + if piece_size < our_min_size: + piece_size = our_min_size + break + elif 40960 < torrent_file_size < 250000: + break + elif num_pieces > 2200: + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + break + elif torrent_file_size < 2048: + break + elif torrent_file_size > 250000: + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + cli_ui.warning('WARNING: .torrent size will exceed 250 KiB!') + break + + # Update num_pieces + num_pieces = math.ceil(total_size / piece_size) + + # Recalculate torrent_file_size based on file_count in each iteration + if file_count > 1000: + torrent_file_size = 20 + (num_pieces * 20) + int(pathname_bytes * 71 / 100) + elif file_count > 500: + torrent_file_size = 20 + (num_pieces * 20) + int(pathname_bytes * 4 / 5) + else: + torrent_file_size = 20 + (num_pieces * 20) + pathname_bytes + + return piece_size + + +class CustomTorrent(torf.Torrent): + # Default piece size limits + torf.Torrent.piece_size_min = 16384 # 16 KiB + torf.Torrent.piece_size_max = 268435456 # 256 MiB + + def __init__(self, meta, *args, **kwargs): + # Set meta early to avoid AttributeError + self._meta = meta + super().__init__(*args, **kwargs) # Now safe to call parent constructor + self.validate_piece_size(meta) # Validate and set the piece size + + @property + def piece_size(self): + return self._piece_size + + @piece_size.setter + def piece_size(self, value): + if value is None: + total_size = self._calculate_total_size() + value = calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files, self._meta) + self._piece_size = value + self.metainfo['info']['piece length'] = value # Ensure 'piece length' is set + + def _calculate_total_size(self): + return sum(file.size for file in self.files) + + def validate_piece_size(self, meta=None): + if meta is None: + meta = self._meta # Use stored meta if not explicitly provided + if not hasattr(self, '_piece_size') or self._piece_size is None: + total_size = self._calculate_total_size() + self.piece_size = calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files, meta) + self.metainfo['info']['piece length'] = self.piece_size # Ensure 'piece length' is set + + +def create_torrent(meta, path, output_filename): + # Handle directories and file inclusion logic + if meta['isdir']: + if meta['keep_folder']: + cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') + path = path + else: + os.chdir(path) + globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") + no_sample_globs = [] + for file in globs: + if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) + if len(no_sample_globs) == 1: + path = meta['filelist'][0] + if meta['is_disc']: + include, exclude = "", "" + else: + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + include = ["*.mkv", "*.mp4", "*.ts"] + + # Create and write the new torrent using the CustomTorrent class + torrent = CustomTorrent( + meta=meta, + path=path, + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude or [], + include_globs=include or [], + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Ensure piece size is validated before writing + torrent.validate_piece_size(meta) + + # Generate and write the new torrent + torrent.generate(callback=torf_cb, interval=5) + torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) + torrent.verify_filesize(path) + + console.print("[bold green].torrent created", end="\r") + return torrent + + +def torf_cb(torrent, filepath, pieces_done, pieces_total): + # print(f'{pieces_done/pieces_total*100:3.0f} % done') + cli_ui.info_progress("Hashing...", pieces_done, pieces_total) + + +def create_random_torrents(base_dir, uuid, num, path): + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) + base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") + for i in range(1, int(num) + 1): + new_torrent = base_torrent + new_torrent.metainfo['info']['entropy'] = random.randint(1, 999999) + Torrent.copy(new_torrent).write(f"{base_dir}/tmp/{uuid}/[RAND-{i}]{manual_name}.torrent", overwrite=True) + + +async def create_base_from_existing_torrent(torrentpath, base_dir, uuid): + if os.path.exists(torrentpath): + base_torrent = Torrent.read(torrentpath) + base_torrent.trackers = ['https://fake.tracker'] + base_torrent.comment = "Created by L4G's Upload Assistant" + base_torrent.created_by = "Created by L4G's Upload Assistant" + # Remove Un-whitelisted info from torrent + for each in list(base_torrent.metainfo['info']): + if each not in ('files', 'length', 'name', 'piece length', 'pieces', 'private', 'source'): + base_torrent.metainfo['info'].pop(each, None) + for each in list(base_torrent.metainfo): + if each not in ('announce', 'comment', 'creation date', 'created by', 'encoding', 'info'): + base_torrent.metainfo.pop(each, None) + base_torrent.source = 'L4G' + base_torrent.private = True + Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) diff --git a/src/trackerhandle.py b/src/trackerhandle.py new file mode 100644 index 00000000..132c0594 --- /dev/null +++ b/src/trackerhandle.py @@ -0,0 +1,143 @@ +import asyncio +import traceback +import requests +import cli_ui +from src.trackers.THR import THR +from src.trackers.PTP import PTP +from src.trackersetup import TRACKER_SETUP +from src.trackers.COMMON import COMMON +from src.manualpackage import package + + +async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): + tracker_capabilities = { + 'AITHER': {'mod_q': True, 'draft': False}, + 'BHD': {'draft_live': True}, + 'BLU': {'mod_q': True, 'draft': False}, + 'LST': {'mod_q': True, 'draft': True} + } + + modq, draft = None, None + tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) + if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): + draft_int = await tracker_class.get_live(meta) + draft = "Draft" if draft_int == 0 else "Live" + + else: + if tracker_caps.get('mod_q'): + modq = await tracker_class.get_flag(meta, 'modq') + modq = 'Yes' if modq else 'No' + if tracker_caps.get('draft'): + draft = await tracker_class.get_flag(meta, 'draft') + draft = 'Yes' if draft else 'No' + + return modq, draft + + +async def process_trackers(meta, config, client, console, api_trackers, tracker_class_map, http_trackers, other_api_trackers): + common = COMMON(config=config) + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) + + async def process_single_tracker(tracker): + if meta['name'].endswith('DUPE?'): + meta['name'] = meta['name'].replace(' DUPE?', '') + + if meta['debug']: + debug = "(DEBUG)" + else: + debug = "" + disctype = meta.get('disctype', None) + tracker = tracker.replace(" ", "").upper().strip() + + if tracker in api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + if upload_status: + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + console.print(f"Uploading to {tracker_class.tracker}") + await tracker_class.upload(meta, disctype) + await client.add_to_client(meta, tracker_class.tracker) + + elif tracker in other_api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + if upload_status: + console.print(f"Uploading to {tracker_class.tracker}") + if tracker != "TL": + if tracker == "RTF": + await tracker_class.api_test(meta) + if tracker == "TL" or upload_status: + await tracker_class.upload(meta, disctype) + if tracker == 'SN': + await asyncio.sleep(16) + await client.add_to_client(meta, tracker_class.tracker) + + elif tracker in http_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + if upload_status: + console.print(f"Uploading to {tracker}") + if await tracker_class.validate_credentials(meta) is True: + await tracker_class.upload(meta, disctype) + await client.add_to_client(meta, tracker_class.tracker) + + elif tracker == "MANUAL": + if meta['unattended']: + do_manual = True + else: + do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) + if do_manual: + for manual_tracker in enabled_trackers: + if manual_tracker != 'MANUAL': + manual_tracker = manual_tracker.replace(" ", "").upper().strip() + tracker_class = tracker_class_map[manual_tracker](config=config) + if manual_tracker in api_trackers: + await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) + else: + await tracker_class.edit_desc(meta) + url = await package(meta) + if url is False: + console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") + else: + console.print(f"[green]{meta['name']}") + console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") + + elif tracker == "THR": + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + if upload_status: + thr = THR(config=config) + try: + with requests.Session() as session: + console.print("[yellow]Logging in to THR") + session = thr.login(session) + await thr.upload(session, meta, disctype) + await client.add_to_client(meta, "THR") + except Exception: + console.print(traceback.format_exc()) + + elif tracker == "PTP": + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + if upload_status: + ptp = PTP(config=config) + groupID = meta.get('ptp_groupID', None) + ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) + await ptp.upload(meta, ptpUrl, ptpData, disctype) + await client.add_to_client(meta, "PTP") + + # Process all trackers concurrently + tasks = [process_single_tracker(tracker) for tracker in enabled_trackers] + await asyncio.gather(*tasks) diff --git a/src/trackermeta.py b/src/trackermeta.py new file mode 100644 index 00000000..544a63ec --- /dev/null +++ b/src/trackermeta.py @@ -0,0 +1,355 @@ +from src.console import console +from src.trackers.COMMON import COMMON +from data.config import config +import aiohttp +import asyncio +import sys +from PIL import Image +import io +from io import BytesIO + + +async def prompt_user_for_confirmation(message: str) -> bool: + try: + response = input(f"{message} (Y/n): ").strip().lower() + if response in ["y", "yes", ""]: + return True + return False + except EOFError: + sys.exit(1) + + +async def check_images_concurrently(imagelist, meta): + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + invalid_host_found = False # Track if any image is on a non-approved host + + # Ensure meta['image_sizes'] exists + if 'image_sizes' not in meta: + meta['image_sizes'] = {} + + # Map fixed resolution names to vertical resolutions + resolution_map = { + '8640p': 8640, + '4320p': 4320, + '2160p': 2160, + '1440p': 1440, + '1080p': 1080, + '1080i': 1080, + '720p': 720, + '576p': 576, + '576i': 576, + '480p': 480, + '480i': 480, + } + + # Get expected vertical resolution + expected_resolution_name = meta.get('resolution', None) + expected_vertical_resolution = resolution_map.get(expected_resolution_name, None) + + # If no valid resolution is found, skip processing + if expected_vertical_resolution is None: + console.print("[red]Meta resolution is invalid or missing. Skipping all images.[/red]") + return [] + + # Function to check each image's URL, host, and log resolution + async def check_and_collect(image_dict): + img_url = image_dict.get('raw_url') + if not img_url: + return None + + if "ptpimg.me" in img_url and img_url.startswith("http://"): + img_url = img_url.replace("http://", "https://") + image_dict['raw_url'] = img_url + image_dict['web_url'] = img_url + + # Verify the image link + if await check_image_link(img_url): + # Check if the image is hosted on an approved image host + if not any(host in img_url for host in approved_image_hosts): + nonlocal invalid_host_found + invalid_host_found = True # Mark that we found an invalid host + + async with aiohttp.ClientSession() as session: + async with session.get(img_url) as response: + if response.status == 200: + image_content = await response.read() + + try: + image = Image.open(BytesIO(image_content)) + vertical_resolution = image.height + lower_bound = expected_vertical_resolution * 0.70 # 30% below + if meta['is_disc'] == "DVD": + upper_bound = expected_vertical_resolution * 1.30 + else: + upper_bound = expected_vertical_resolution * 1.00 + + if not (lower_bound <= vertical_resolution <= upper_bound): + console.print( + f"[red]Image {img_url} resolution ({vertical_resolution}p) " + f"is outside the allowed range ({int(lower_bound)}-{int(upper_bound)}p). Skipping.[/red]" + ) + return None + + meta['image_sizes'][img_url] = len(image_content) + console.print( + f"Valid image {img_url} with resolution {image.width}x{image.height} " + f"and size {len(image_content) / 1024:.2f} KiB" + ) + except Exception as e: + console.print(f"[red]Failed to process image {img_url}: {e}") + return None + else: + console.print(f"[red]Failed to fetch image {img_url}. Skipping.") + + return image_dict + else: + return None + + # Run image verification concurrently + tasks = [check_and_collect(image_dict) for image_dict in imagelist] + results = await asyncio.gather(*tasks) + + # Collect valid images + valid_images = [image for image in results if image is not None] + + # Convert default_trackers string into a list + default_trackers = config['TRACKERS'].get('default_trackers', '') + trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] + + # Ensure meta['trackers'] is a list + if meta.get('trackers') is not None: + if isinstance(meta.get('trackers', ''), str): + meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] + if 'MTV' in meta.get('trackers', []): + if invalid_host_found: + console.print( + "[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]" + ) + # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list + elif 'MTV' in trackers_list: + if invalid_host_found: + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]") + + return valid_images + + +async def check_image_link(url): + async with aiohttp.ClientSession() as session: + try: + async with session.get(url) as response: + if response.status == 200: + content_type = response.headers.get('Content-Type', '').lower() + if 'image' in content_type: + # Attempt to load the image + image_data = await response.read() + try: + image = Image.open(io.BytesIO(image_data)) + image.verify() # This will check if the image is broken + console.print(f"[green]Image verified successfully: {url}[/green]") + return True + except (IOError, SyntaxError) as e: # noqa #F841 + console.print(f"[red]Image verification failed (corrupt image): {url}[/red]") + return False + else: + console.print(f"[red]Content type is not an image: {url}[/red]") + return False + else: + console.print(f"[red]Failed to retrieve image: {url} (status code: {response.status})[/red]") + return False + except Exception as e: + console.print(f"[red]Exception occurred while checking image: {url} - {str(e)}[/red]") + return False + + +async def update_meta_with_unit3d_data(meta, tracker_data, tracker_name): + # Unpack the expected 9 elements, ignoring any additional ones + tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data + + if tmdb not in [None, '0']: + meta['tmdb_manual'] = tmdb + if imdb not in [None, '0']: + meta['imdb'] = str(imdb).zfill(7) + if tvdb not in [None, '0']: + meta['tvdb_id'] = tvdb + if mal not in [None, '0']: + meta['mal'] = mal + if desc not in [None, '0', '']: + meta['description'] = desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write((desc or "") + "\n") + if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() + + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if imagelist: # Ensure imagelist is not empty before setting + valid_images = await check_images_concurrently(imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + if meta.get('image_list'): # Double-check if image_list is set before handling it + if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta['unattended']: + await handle_image_list(meta, tracker_name) + + if filename: + meta[f'{tracker_name.lower()}_filename'] = filename + + console.print(f"[green]{tracker_name} data successfully updated in meta[/green]") + + +async def update_metadata_from_tracker(tracker_name, tracker_instance, meta, search_term, search_file_folder): + tracker_key = tracker_name.lower() + manual_key = f"{tracker_key}_manual" + found_match = False + + if tracker_name in ["BLU", "AITHER", "LST", "OE", "TIK"]: + if meta.get(tracker_key) is not None: + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + tracker_data = await COMMON(config).unit3d_torrent_info( + tracker_name, + tracker_instance.torrent_url, + tracker_instance.search_url, + meta, + id=meta[tracker_key] + ) + else: + console.print(f"[yellow]No ID found in meta for {tracker_name}, searching by file name[/yellow]") + tracker_data = await COMMON(config).unit3d_torrent_info( + tracker_name, + tracker_instance.torrent_url, + tracker_instance.search_url, + meta, + file_name=search_term + ) + + if any(item not in [None, '0'] for item in tracker_data[:3]): # Check for valid tmdb, imdb, or tvdb + console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") + await update_meta_with_unit3d_data(meta, tracker_data, tracker_name) + found_match = True + else: + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") + found_match = False + + elif tracker_name == "PTP": + imdb_id = None + if meta.get('ptp') is None: + imdb_id, ptp_torrent_id, ptp_torrent_hash = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder, meta) + if ptp_torrent_id: + meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None + console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + + if not meta['unattended']: + if await prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): + found_match = True + meta['ptp'] = ptp_torrent_id + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write((ptp_desc or "") + "\n") + + if not meta.get('image_list'): + valid_images = await check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + await handle_image_list(meta, tracker_name) + + else: + found_match = False + meta['imdb'] = None + + else: + found_match = True + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write((ptp_desc or "") + "\n") + meta['saved_description'] = True + + if not meta.get('image_list'): + valid_images = await check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + else: + console.print("[yellow]Skipping PTP as no match found[/yellow]") + found_match = False + + else: + ptp_torrent_id = meta['ptp'] + console.print("[cyan]Using specified PTP ID to get IMDb ID[/cyan]") + imdb_id, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + if imdb_id: + meta['imdb'] = str(imdb_id).zfill(7) + console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") + found_match = True + meta['skipit'] = True + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(ptp_desc + "\n") + meta['saved_description'] = True + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + console.print("[green]PTP images added to metadata.[/green]") + else: + console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") + found_match = False + + elif tracker_name == "HDB": + if meta.get('hdb') is not None: + meta[manual_key] = meta[tracker_key] + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + + # Use get_info_from_torrent_id function if ID is found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) + + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + found_match = True + + # Skip user confirmation if searching by ID + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + else: + console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + + # Use search_filename function if ID is not found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder, meta) + + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + if tracker_id: + meta[tracker_key] = tracker_id + found_match = True + + if found_match: + if imdb or tvdb_id or hdb_name: + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + if await prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): + console.print(f"[green]{tracker_name} data retained.[/green]") + else: + console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") + meta[tracker_key] = None + meta['tvdb_id'] = None + meta['hdb_name'] = None + found_match = False + else: + found_match = False + + return meta, found_match + + +async def handle_image_list(meta, tracker_name): + if meta.get('image_list'): + console.print(f"[cyan]Found the following images from {tracker_name}:") + for img in meta['image_list']: + console.print(f"[blue]{img}[/blue]") + + if meta['unattended']: + keep_images = True + else: + keep_images = await prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") + if not keep_images: + meta['image_list'] = [] + meta['image_sizes'] = {} + console.print(f"[yellow]Images discarded from {tracker_name}.") + else: + console.print(f"[green]Images retained from {tracker_name}.") diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 76fd3b9f..b438894a 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -8,6 +8,7 @@ from src.trackers.COMMON import COMMON from src.console import console import bencodepy +import httpx class ACM(): @@ -281,15 +282,21 @@ async def search_existing(self, meta, disctype): } # Adding Name to search seems to override tmdb try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 2ab28bbe..7c08c3e0 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -5,9 +5,9 @@ from str2bool import str2bool import platform import re -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -58,7 +58,8 @@ async def upload(self, meta, disctype): mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + open_torrent = open(torrent_file_path, 'rb') files = {'torrent': open_torrent} base_dir = meta['base_dir'] uuid = meta['uuid'] @@ -116,6 +117,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://aither.cc/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -242,54 +246,21 @@ async def search_existing(self, meta, disctype): params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/AL.py b/src/trackers/AL.py index eea9eede..b4d0dbab 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -4,9 +4,9 @@ import requests import platform from str2bool import str2bool -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -152,6 +152,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://animelovers.club/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -174,15 +177,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes @@ -191,42 +200,3 @@ async def search_existing(self, meta, disctype): async def edit_name(self, meta): name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "x264").replace("H 265", "x265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") return name - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index f88ae379..6a7c399f 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -4,11 +4,13 @@ import asyncio import requests import platform +import httpx from str2bool import str2bool from pymediainfo import MediaInfo from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console +from src.torrentcreate import create_torrent class ANT(): @@ -69,11 +71,8 @@ async def upload(self, meta, disctype): # Trigger regeneration automatically if size constraints aren't met if torrent_file_size_kib > 250: # 250 KiB console.print("[yellow]Existing .torrent exceeds 250 KiB and will be regenerated to fit constraints.") - - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) meta['max_piece_size'] = '256' # 256 MiB - prep.create_torrent(meta, Path(meta['path']), "ANT") + create_torrent(meta, Path(meta['path']), "ANT") torrent_filename = "ANT" await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) @@ -145,7 +144,7 @@ async def search_existing(self, meta, disctype): if meta.get('category') == "TV": console.print('[bold red]This site only ALLOWS Movies.') meta['skipping'] = "ANT" - return + return [] dupes = [] console.print("[yellow]Searching for existing torrents on ANT...") params = { @@ -157,18 +156,28 @@ async def search_existing(self, meta, disctype): params['tmdb'] = meta['tmdb'] elif int(meta['imdb_id'].replace('tt', '')) != 0: params['imdb'] = meta['imdb_id'] + try: - response = requests.get(url='https://anthelion.me/api', params=params) - response = response.json() - for each in response['item']: - largest = [each][0]['files'][0] - for file in [each][0]['files']: - if int(file['size']) > int(largest['size']): - largest = file - result = largest['name'] - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url='https://anthelion.me/api', params=params) + if response.status_code == 200: + data = response.json() + for each in data.get('item', []): + # Find the largest file + largest = each['files'][0] + for file in each['files']: + if int(file['size']) > int(largest['size']): + largest = file + result = largest['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d3a2361a..62e0b8cd 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -6,14 +6,15 @@ from str2bool import str2bool import os import platform -import hashlib import bencodepy import glob -import multiprocessing +import httpx +import re from urllib.parse import urlparse - from src.trackers.COMMON import COMMON from src.console import console +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.uploadscreens import upload_screens class BHD(): @@ -123,7 +124,6 @@ async def upload_with_retry(self, meta, common, img_host_index=1): if os.path.exists(torrent_file): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files['file'] = open_torrent.read() - open_torrent.close() data = { 'name': bhd_name, @@ -164,6 +164,7 @@ async def upload_with_retry(self, meta, common, img_host_index=1): } url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() + details_link = {} if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: @@ -177,14 +178,39 @@ async def upload_with_retry(self, meta, common, img_host_index=1): response = response.json() elif response['status_message'].startswith('Invalid name value'): console.print(f"[bold yellow]Submitted Name: {bhd_name}") + + if 'status_message' in response: + match = re.search(r"https://beyond-hd\.me/torrent/download/.*\.(\d+)\.", response['status_message']) + if match: + torrent_id = match.group(1) + details_link = f"https://beyond-hd.me/details/{torrent_id}" + else: + console.print("[yellow]No valid details link found in status_message.") + console.print(response) - except Exception: + except Exception as e: console.print("It may have uploaded, go check") + console.print(f"Error: {e}") return else: console.print("[cyan]Request Data:") console.print(data) + if details_link: + try: + open_torrent.seek(0) + torrent_data = open_torrent.read() + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + with open(torrent_file, 'wb') as updated_torrent_file: + updated_torrent_file.write(bencodepy.encode(torrent)) + + console.print(f"Torrent file updated with comment: {details_link}") + except Exception as e: + console.print(f"Error while editing the torrent file: {e}") + + open_torrent.close() + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] @@ -203,15 +229,12 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts filelist = meta.get('video', []) filename = meta['filename'] path = meta['path'] - if isinstance(filelist, str): filelist = [filelist] multi_screens = int(self.config['DEFAULT'].get('screens', 6)) base_dir = meta['base_dir'] folder_id = meta['uuid'] - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) meta[new_images_key] = [] screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) @@ -229,28 +252,24 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if meta.get('debug'): console.print("[yellow]The image host of existing images is not supported.") console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") - if meta['is_disc'] == "BDMV": - s = multiprocessing.Process( - target=prep.disc_screenshots, - args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, - meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) - ) + try: + disc_screenshots(meta, filename, meta['bdinfo'], folder_id, base_dir, meta.get('vapoursynth', False), [], meta.get('ffdebug', False), multi_screens, True) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") elif meta['is_disc'] == "DVD": - s = multiprocessing.Process( - target=prep.dvd_screenshots, - args=(meta, 0, None, True) - ) + try: + dvd_screenshots( + meta, 0, None, True + ) + except Exception as e: + print(f"Error during DVD screenshot capture: {e}") else: - s = multiprocessing.Process( - target=prep.screenshots, - args=(path, f"{filename}", meta['uuid'], base_dir, - meta, multi_screens + 1, True, None) - ) - - s.start() - while s.is_alive(): - await asyncio.sleep(1) + try: + screenshots( + path, filename, meta['uuid'], base_dir, meta, multi_screens, True, None) + except Exception as e: + print(f"Error during generic screenshot capture: {e}") if meta['is_disc'] == "DVD": existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") @@ -263,57 +282,60 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts console.print("[red]No screenshots were generated or found. Please check the screenshot generation process.") return [], True, images_reuploaded - uploaded_images = [] - while True: - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + if not meta.get('skip_imghost_upload', False): + uploaded_images = [] + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) - if not current_img_host: - console.print("[red]No more image hosts left to try.") - return - - if current_img_host not in approved_image_hosts: - console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") - retry_mode = True - images_reuploaded = True - img_host_index += 1 - continue - else: - meta['imghost'] = current_img_host - console.print(f"[green]Uploading to approved host '{current_img_host}'.") - break + if not current_img_host: + console.print("[red]No more image hosts left to try.") + return - uploaded_images, _ = prep.upload_screens( - meta, multi_screens, img_host_index, 0, multi_screens, - all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode - ) + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break - if uploaded_images: - meta[new_images_key] = uploaded_images + uploaded_images, _ = upload_screens( + meta, multi_screens, img_host_index, 0, multi_screens, + all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode + ) - if meta['debug']: - for image in uploaded_images: - console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") - - for image in meta.get(new_images_key, []): - raw_url = image['raw_url'] - parsed_url = urlparse(raw_url) - hostname = parsed_url.netloc - mapped_host = self.match_host(hostname, url_host_mapping.keys()) - mapped_host = url_host_mapping.get(mapped_host, mapped_host) - - if mapped_host not in approved_image_hosts: - console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") - return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts - - if all( - url_host_mapping.get( - self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), - self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), - ) in approved_image_hosts - for image in meta[new_images_key] - ): + if uploaded_images: + meta[new_images_key] = uploaded_images + if meta['debug']: + for image in uploaded_images: + console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + + for image in meta.get(new_images_key, []): + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) + + if mapped_host not in approved_image_hosts: + console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta[new_images_key] + ): + + return meta[new_images_key], False, images_reuploaded + else: return meta[new_images_key], False, images_reuploaded async def get_cat_id(self, category_name): @@ -421,29 +443,34 @@ async def edit_desc(self, meta): async def search_existing(self, meta, disctype): bhd_name = await self.edit_name(meta) - if any(phrase in bhd_name.lower() for phrase in ("-framestor", "-bhdstudio", "-bmf", "-decibel", "-d-zone", "-hifi", "-ncmt", "-tdd", "-flux", "-crfw", "-sonny", "-zr-", "-mkvultra", "-rpg", "-w4nk3r", "-irobot", "-beyondhd")): + if any(phrase in bhd_name.lower() for phrase in ( + "-framestor", "-bhdstudio", "-bmf", "-decibel", "-d-zone", "-hifi", + "-ncmt", "-tdd", "-flux", "-crfw", "-sonny", "-zr-", "-mkvultra", + "-rpg", "-w4nk3r", "-irobot", "-beyondhd" + )): console.print("[bold red]This is an internal BHD release, skipping upload[/bold red]") meta['skipping'] = "BHD" - return + return [] if meta['type'] == "DVDRIP": console.print("[bold red]No DVDRIP at BHD, skipping upload[/bold red]") meta['skipping'] = "BHD" - return + return [] + dupes = [] console.print("[yellow]Searching for existing torrents on BHD...") category = meta['category'] + tmdbID = "movie" if category == 'MOVIE' else "tv" if category == 'MOVIE': - tmdbID = "movie" category = "Movies" - if category == "TV": - tmdbID = "tv" + elif category == "TV": + category = "TV" + data = { 'action': 'search', 'tmdb_id': f"{tmdbID}/{meta['tmdb']}", 'categories': category, - 'types': await self.get_type(meta), + 'types': await self.get_type(meta) } - # Search all releases if SD if meta['sd'] == 1: data['categories'] = None data['types'] = None @@ -451,21 +478,29 @@ async def search_existing(self, meta, disctype): if meta.get('tv_pack', 0) == 1: data['pack'] = 1 data['search'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" try: - response = requests.post(url=url, data=data) - response = response.json() - if response.get('status_code') == 1: - for each in response['results']: - result = each['name'] - difference = SequenceMatcher(None, meta['clean_name'].replace('DD+', 'DDP'), result).ratio() - if difference >= 0.05: - dupes.append(result) - else: - console.print(f"[yellow]{response.get('status_message')}") - await asyncio.sleep(5) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Most likely the site is down.') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.post(url, params=data) + if response.status_code == 200: + data = response.json() + if data.get('status_code') == 1: + for each in data['results']: + result = each['name'] + difference = SequenceMatcher(None, meta['clean_name'].replace('DD+', 'DDP'), result).ratio() + if difference >= 0.05: + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. API Error: {data.get('message', 'Unknown Error')}") + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes @@ -541,48 +576,3 @@ async def edit_name(self, meta): if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': name = name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) return name - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - torrent = bencodepy.decode(torrent_data) - info_dict = torrent[b'info'] - bencoded_info = bencodepy.encode(info_dict) - info_hash = hashlib.sha1(bencoded_info).hexdigest() - # console.print(f"Info Hash: {info_hash}") - - params = { - 'action': 'search', - 'info_hash': info_hash - } - url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" - try: - response = requests.post(url=url, json=params) - response_data = response.json() - # console.print(f"[yellow]Response Data: {response_data}") - - if response_data.get('total_results') == 1: - for each in response_data['results']: - details_link = f"https://beyond-hd.me/details/{each['id']}" - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 23908ab7..dc48c33b 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -4,9 +4,9 @@ import requests import platform from str2bool import str2bool -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -127,9 +127,11 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://blutopia.cc/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") - return else: console.print("[cyan]Request Data:") @@ -222,54 +224,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index f6813c05..dcf75a57 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -111,6 +111,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://capybarabr.com/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -171,15 +174,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes @@ -189,42 +198,3 @@ async def edit_name(self, meta): name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") return name - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 5d2ed253..5317ac46 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -7,11 +7,11 @@ import sys import glob from pymediainfo import MediaInfo -import multiprocessing -import asyncio from src.bbcode import BBCODE from src.console import console +from src.uploadscreens import upload_screens +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots class COMMON(): @@ -40,8 +40,6 @@ async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, com Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) char_limit = int(self.config['DEFAULT'].get('charLimit', 14000)) @@ -113,85 +111,86 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(image_str) descfile.write("[/center]\n\n") else: - # Check if screenshots exist for the current disc key - if new_images_key in meta and meta[new_images_key]: - if meta['debug']: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") - descfile.write("[center]") - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") - descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") - descfile.write("[/center]\n\n") - # Use existing URLs from meta to write to descfile - descfile.write("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - descfile.write("[/center]\n\n") - else: - # Increment retry_count for tracking but use unique disc keys for each disc - meta['retry_count'] += 1 - meta[new_images_key] = [] - descfile.write("[center]") - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") - descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") - descfile.write("[/center]\n\n") - # Check if new screenshots already exist before running prep.screenshots - if each['type'] == "BDMV": - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - elif each['type'] == "DVD": - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - if not new_screens: + if multi_screens != 0: + # Check if screenshots exist for the current disc key + if new_images_key in meta and meta[new_images_key]: if meta['debug']: - console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") - # Run prep.screenshots if no screenshots are present + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + descfile.write("[center]") if each['type'] == "BDMV": - use_vs = meta.get('vapoursynth', False) - s = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True)) - s.start() - while s.is_alive(): - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") elif each['type'] == "DVD": - s = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens, True)) - s.start() - while s.is_alive() is True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) - - # Append each uploaded image's data to `meta[new_images_key]` - for img in uploaded_images: - meta[new_images_key].append({ - 'img_url': img['img_url'], - 'raw_url': img['raw_url'], - 'web_url': img['web_url'] - }) - - # Write new URLs to descfile + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]\n\n") + # Use existing URLs from meta to write to descfile descfile.write("[center]") - for img in uploaded_images: + for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") - - # Save the updated meta to `meta.json` after upload - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) + else: + # Increment retry_count for tracking but use unique disc keys for each disc + meta['retry_count'] += 1 + meta[new_images_key] = [] + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]\n\n") + # Check if new screenshots already exist before running prep.screenshots + if each['type'] == "BDMV": + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + elif each['type'] == "DVD": + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if not new_screens: + if meta['debug']: + console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") + # Run prep.screenshots if no screenshots are present + if each['type'] == "BDMV": + use_vs = meta.get('vapoursynth', False) + try: + disc_screenshots(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if each['type'] == "DVD": + try: + dvd_screenshots(meta, i, multi_screens, True) + except Exception as e: + print(f"Error during DVD screenshot capture: {e}") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + + if new_screens and not meta.get('skip_imghost_upload', False): + uploaded_images, _ = upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + + # Append each uploaded image's data to `meta[new_images_key]` + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + + # Write new URLs to descfile + descfile.write("[center]") + for img in uploaded_images: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + descfile.write("[/center]\n\n") + + # Save the updated meta to `meta.json` after upload + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) # Handle single file case if len(filelist) == 1: @@ -229,25 +228,24 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i > 0: new_images_key = f'new_images_file_{i}' if new_images_key not in meta or not meta[new_images_key]: - # Proceed with image generation if not already present meta[new_images_key] = [] + # Proceed with image generation if not already present new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") # If no screenshots exist, create them if not new_screens: if meta['debug']: console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None)) - s.start() - while s.is_alive(): - await asyncio.sleep(1) + try: + screenshots(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None) + except Exception as e: + print(f"Error during generic screenshot capture: {e}") - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") # Upload generated screenshots - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) - meta[new_images_key] = [] + if new_screens and not meta.get('skip_imghost_upload', False): + uploaded_images, _ = upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) for img in uploaded_images: meta[new_images_key].append({ 'img_url': img['img_url'], @@ -618,13 +616,13 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return "" return ptgen - async def filter_dupes(self, dupes, meta): + async def filter_dupes(self, dupes, meta, tracker_name): """ Filter duplicates by applying exclusion rules. Only non-excluded entries are returned. Everything is a dupe, until it matches a criteria to be excluded. """ if meta['debug']: - console.log("[cyan]Pre-filtered dupes") + console.log(f"[cyan]Pre-filtered dupes from {tracker_name}") console.log(dupes) new_dupes = [] @@ -633,11 +631,11 @@ async def filter_dupes(self, dupes, meta): video_encode = meta.get("video_encode") if video_encode is not None: has_encoder_in_name = video_encode.lower() - normalized_encoder = self.normalize_filename(has_encoder_in_name) + normalized_encoder = await self.normalize_filename(has_encoder_in_name) else: normalized_encoder = False has_is_disc = bool(meta.get('is_disc', False)) - target_hdr = self.refine_hdr_terms(meta.get("hdr")) + target_hdr = await self.refine_hdr_terms(meta.get("hdr")) target_season = meta.get("season") target_episode = meta.get("episode") target_resolution = meta.get("resolution") @@ -672,17 +670,17 @@ async def filter_dupes(self, dupes, meta): }, ] - def log_exclusion(reason, item): + async def log_exclusion(reason, item): if meta['debug']: console.log(f"[yellow]Excluding result due to {reason}: {item}") - def process_exclusion(each): + async def process_exclusion(each): """ Determine if an entry should be excluded. Returns True if the entry should be excluded, otherwise allowed as dupe. """ - normalized = self.normalize_filename(each) - file_hdr = self.refine_hdr_terms(normalized) + normalized = await self.normalize_filename(each) + file_hdr = await self.refine_hdr_terms(normalized) if meta['debug']: console.log(f"[debug] Evaluating dupe: {each}") @@ -701,63 +699,63 @@ def process_exclusion(each): return False if has_is_disc and re.search(r'\.\w{2,4}$', each): - log_exclusion("file extension mismatch (is_disc=True)", each) + await log_exclusion("file extension mismatch (is_disc=True)", each) return True if not is_dvd: if target_resolution and target_resolution not in each: - log_exclusion(f"resolution '{target_resolution}' mismatch", each) + await log_exclusion(f"resolution '{target_resolution}' mismatch", each) return True if is_dvd: if any(str(res) in each for res in [1080, 720, 2160]): - log_exclusion(f"resolution '{target_resolution}' mismatch", each) + await log_exclusion(f"resolution '{target_resolution}' mismatch", each) return True for check in attribute_checks: if check["key"] == "repack": if has_repack_in_uuid and "repack" not in normalized: if tag and tag in normalized: - log_exclusion("missing 'repack'", each) + await log_exclusion("missing 'repack'", each) return True elif check["uuid_flag"] != check["condition"](each): - log_exclusion(f"{check['key']} mismatch", each) + await log_exclusion(f"{check['key']} mismatch", each) return True if not is_dvd: - if not self.has_matching_hdr(file_hdr, target_hdr, meta): - log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) + if not await self.has_matching_hdr(file_hdr, target_hdr, meta): + await log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) return True - season_episode_match = self.is_season_episode_match(normalized, target_season, target_episode) - if meta['debug']: - console.log(f"[debug] Season/Episode match result: {season_episode_match}") - if not season_episode_match: - log_exclusion("season/episode mismatch", each) - return True + if meta.get('category') == "TV": + season_episode_match = await self.is_season_episode_match(normalized, target_season, target_episode) + if meta['debug']: + console.log(f"[debug] Season/Episode match result: {season_episode_match}") + if not season_episode_match: + await log_exclusion("season/episode mismatch", each) + return True if not is_dvd: if normalized_encoder and normalized_encoder in each: - log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) + await log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) return False if web_dl and ("web-dl" in normalized or "webdl" in normalized or "web dl" in normalized): return False - console.log(f"[debug] Passed all checks: {each}") + if meta['debug']: + console.log(f"[debug] Passed all checks: {each}") return False for each in dupes: - console.log(f"[debug] Evaluating dupe: {each}") - if not process_exclusion(each): + if not await process_exclusion(each): new_dupes.append(each) - if meta['debug']: - console.log(f"[cyan]Final dupes: {new_dupes}") + console.log(f"[cyan]Final dupes on {tracker_name}: {new_dupes}") return new_dupes - def normalize_filename(self, filename): + async def normalize_filename(self, filename): """ Normalize a filename for easier matching. Retain season/episode information in the format SxxExx. @@ -766,7 +764,7 @@ def normalize_filename(self, filename): return normalized - def is_season_episode_match(self, filename, target_season, target_episode): + async def is_season_episode_match(self, filename, target_season, target_episode): """ Check if the filename matches the given season and episode. """ @@ -786,7 +784,7 @@ def is_season_episode_match(self, filename, target_season, target_episode): return episode_pattern in filename return True - def refine_hdr_terms(self, hdr): + async def refine_hdr_terms(self, hdr): """ Normalize HDR terms for consistent comparison. Simplifies all HDR entries to 'HDR' and DV entries to 'DV'. @@ -801,7 +799,7 @@ def refine_hdr_terms(self, hdr): terms.add("HDR") return terms - def has_matching_hdr(self, file_hdr, target_hdr, meta): + async def has_matching_hdr(self, file_hdr, target_hdr, meta): """ Check if the HDR terms match or are compatible. """ diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 5d376c91..5cc31e1a 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -9,6 +9,7 @@ from urllib.parse import urlparse import cli_ui from bs4 import BeautifulSoup +import httpx from src.trackers.COMMON import COMMON from src.exceptions import * # noqa F403 @@ -192,32 +193,46 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) - - search_url = "https://filelist.io/browse.php" - if int(meta['imdb_id'].replace('tt', '')) != 0: - params = { - 'search': meta['imdb_id'], - 'cat': await self.get_category_id(meta), - 'searchin': '3' - } - else: - params = { - 'search': meta['title'], - 'cat': await self.get_category_id(meta), - 'searchin': '0' - } + cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") + + with open(cookiefile, 'rb') as cf: + cookies = pickle.load(cf) + + search_url = "https://filelist.io/browse.php" - r = session.get(search_url, params=params) + if int(meta['imdb_id'].replace('tt', '')) != 0: + params = { + 'search': meta['imdb_id'], + 'cat': await self.get_category_id(meta), + 'searchin': '3' + } + else: + params = { + 'search': meta['title'], + 'cat': await self.get_category_id(meta), + 'searchin': '0' + } + + try: + async with httpx.AsyncClient(cookies=cookies, timeout=10.0) as client: + response = await client.get(search_url, params=params) + if response.status_code == 200: + soup = BeautifulSoup(response.text, 'html.parser') + find = soup.find_all('a', href=True) + for each in find: + if each['href'].startswith('details.php?id=') and "&" not in each['href']: + dupes.append(each['title']) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + await asyncio.sleep(0.5) + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) - for each in find: - if each['href'].startswith('details.php?id=') and "&" not in each['href']: - dupes.append(each['title']) return dupes diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index eac00ec0..5395c269 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -144,6 +144,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://fearnopeer.com/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -166,54 +169,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index decfb328..ff8312e2 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -5,6 +5,7 @@ from pathlib import Path import json import glob +import httpx from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON @@ -12,6 +13,7 @@ from src.console import console from datetime import datetime from torf import Torrent +from src.torrentcreate import CustomTorrent, torf_cb class HDB(): @@ -224,11 +226,7 @@ async def upload(self, meta, disctype): if torrent.piece_size > 16777216: # 16 MiB in bytes console.print("[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent") - # Import Prep and regenerate the torrent with 16 MiB piece size limit - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - - if meta['is_disc'] == 1: + if meta['is_disc']: include = [] exclude = [] else: @@ -236,7 +234,7 @@ async def upload(self, meta, disctype): exclude = ["*.*", "*sample.mkv", "!sample*.*"] # Create a new torrent with piece size explicitly set to 16 MiB - new_torrent = prep.CustomTorrent( + new_torrent = CustomTorrent( meta=meta, path=Path(meta['path']), trackers=["https://fake.tracker"], @@ -255,7 +253,7 @@ async def upload(self, meta, disctype): # Validate and write the new torrent new_torrent.validate_piece_size() - new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.generate(callback=torf_cb, interval=5) new_torrent.write(torrent_path, overwrite=True) # Proceed with the upload process @@ -322,6 +320,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on HDB...") + url = "https://hdbits.org/api/torrents" data = { 'username': self.username, @@ -331,18 +330,33 @@ async def search_existing(self, meta, disctype): 'medium': await self.get_type_medium_id(meta), 'search': meta['resolution'] } + + # Add IMDb and TVDB IDs if available if int(meta.get('imdb_id', '0').replace('tt', '0')) != 0: data['imdb'] = {'id': meta['imdb_id']} if int(meta.get('tvdb_id', '0')) != 0: data['tvdb'] = {'id': meta['tvdb_id']} + try: - response = requests.get(url=url, data=json.dumps(data)) - response = response.json() - for each in response['data']: - result = each['name'] - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your passkey is incorrect') + # Send POST request with JSON body + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.post(url, json=data) + + if response.status_code == 200: + response_data = response.json() + for each in response_data.get('data', []): + result = each['name'] + dupes.append(result) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print("[bold red]Unexpected error occurred while searching torrents.") + console.print(str(e)) await asyncio.sleep(5) return dupes diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 735a7f5f..90f582f4 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -3,6 +3,7 @@ import re import os import cli_ui +import httpx from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode @@ -217,13 +218,16 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - with requests.Session() as session: - common = COMMON(config=self.config) - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") - session.cookies.update(await common.parseCookieFile(cookiefile)) + console.print("[yellow]Searching for existing torrents on HDT...") - search_url = "https://hd-torrents.net/torrents.php" - csrfToken = await self.get_csrfToken(session, search_url) + common = COMMON(config=self.config) + cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") + cookies = await common.parseCookieFile(cookiefile) + + search_url = "https://hd-torrents.net/torrents.php" + + async with httpx.AsyncClient(cookies=cookies, timeout=10.0) as client: + csrfToken = await self.get_csrfToken(client, search_url) if int(meta['imdb_id'].replace('tt', '')) != 0: params = { 'csrfToken': csrfToken, @@ -240,13 +244,26 @@ async def search_existing(self, meta, disctype): 'options': '3' } - r = session.get(search_url, params=params) - await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) - for each in find: - if each['href'].startswith('details.php?id='): - dupes.append(each.text) + try: + response = await client.get(search_url, params=params) + if response.status_code == 200: + soup = BeautifulSoup(response.text, 'html.parser') + find = soup.find_all('a', href=True) + for each in find: + if each['href'].startswith('details.php?id='): + dupes.append(each.text) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + await asyncio.sleep(0.5) + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + await asyncio.sleep(0.5) return dupes diff --git a/src/trackers/HHD.py b/src/trackers/HHD.py index 67630ea9..cd795c12 100644 --- a/src/trackers/HHD.py +++ b/src/trackers/HHD.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -139,6 +139,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://homiehelpdesk.net/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -161,54 +164,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/HP.py b/src/trackers/HP.py index d458c572..17f50593 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -4,7 +4,7 @@ import requests import platform from str2bool import str2bool -import bencodepy +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -133,6 +133,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://hidden-palace.net/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -158,54 +161,21 @@ async def search_existing(self, meta, disctype): params['name'] = params['name'] + meta['edition'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 8eabb871..39545c5e 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -6,8 +6,8 @@ import os import re import platform -import bencodepy import cli_ui +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -307,54 +307,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] + meta['edition'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 4b1aae56..3711bba6 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -4,7 +4,7 @@ import requests import platform from str2bool import str2bool -import bencodepy +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -140,6 +140,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://jptv.club/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -165,15 +168,21 @@ async def search_existing(self, meta, disctype): console.log("[cyan]Dupe Search Parameters") console.log(params) try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes @@ -199,42 +208,3 @@ async def edit_name(self, meta): name = name.replace("DD+ ", "DD+") return name - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 463fe2ea..ad198642 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -4,9 +4,9 @@ import requests import platform from str2bool import str2bool -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -111,6 +111,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://locadora.cc/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -173,15 +176,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes @@ -191,42 +200,3 @@ async def edit_name(self, meta): name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.264").replace("DD+7 1", "DD+7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('-C A A', '-C.A.A'), return name - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 503ded33..63461fda 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -4,10 +4,9 @@ import requests import platform from str2bool import str2bool -import bencodepy import os import glob - +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -105,7 +104,8 @@ async def upload(self, meta, disctype): if meta.get('service') == "hentai": desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + open_torrent = open(torrent_file_path, 'rb') files = {'torrent': open_torrent} base_dir = meta['base_dir'] uuid = meta['uuid'] @@ -166,6 +166,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://lst.gg/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -213,54 +216,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 0a698c16..387486e3 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -4,9 +4,9 @@ import requests import platform from str2bool import str2bool -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -184,6 +184,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://lat-team.com/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -208,54 +211,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index fc5aa09d..9c55e378 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -3,7 +3,8 @@ from src.console import console import traceback from torf import Torrent -import xml.etree.ElementTree +import httpx +import xml.etree.ElementTree as ET import os import cli_ui import pickle @@ -13,8 +14,10 @@ from src.trackers.COMMON import COMMON from datetime import datetime import glob -import multiprocessing from urllib.parse import urlparse +from src.torrentcreate import CustomTorrent, torf_cb +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.uploadscreens import upload_screens class MTV(): @@ -71,9 +74,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): include = ["*.mkv", "*.mp4", "*.ts"] exclude = ["*.*", "*sample.mkv", "!sample*.*"] - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - new_torrent = prep.CustomTorrent( + new_torrent = CustomTorrent( meta=meta, path=Path(meta['path']), trackers=["https://fake.tracker"], @@ -88,7 +89,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): new_torrent.piece_size = 8 * 1024 * 1024 new_torrent.validate_piece_size() - new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.generate(callback=torf_cb, interval=5) new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) torrent_filename = "MTV" @@ -235,8 +236,6 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts multi_screens = int(self.config['DEFAULT'].get('screens', 6)) base_dir = meta['base_dir'] folder_id = meta['uuid'] - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) meta[new_images_key] = [] screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) @@ -254,28 +253,24 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if meta.get('debug'): console.print("[yellow]The image host of existing images is not supported.") console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") - if meta['is_disc'] == "BDMV": - s = multiprocessing.Process( - target=prep.disc_screenshots, - args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, - meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) - ) + try: + disc_screenshots(meta, filename, meta['bdinfo'], folder_id, base_dir, meta.get('vapoursynth', False), [], meta.get('ffdebug', False), multi_screens, True) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") elif meta['is_disc'] == "DVD": - s = multiprocessing.Process( - target=prep.dvd_screenshots, - args=(meta, 0, None, True) - ) + try: + dvd_screenshots( + meta, 0, None, True + ) + except Exception as e: + print(f"Error during DVD screenshot capture: {e}") else: - s = multiprocessing.Process( - target=prep.screenshots, - args=(path, f"{filename}", meta['uuid'], base_dir, - meta, multi_screens + 1, True, None) - ) - - s.start() - while s.is_alive(): - await asyncio.sleep(1) + try: + screenshots( + path, filename, meta['uuid'], base_dir, meta, multi_screens, True, None) + except Exception as e: + print(f"Error during generic screenshot capture: {e}") if meta['is_disc'] == "DVD": existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") @@ -288,57 +283,60 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts console.print("[red]No screenshots were generated or found. Please check the screenshot generation process.") return [], True, images_reuploaded - uploaded_images = [] - while True: - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) - - if not current_img_host: - console.print("[red]No more image hosts left to try.") - raise Exception("No valid image host found in the config.") - - if current_img_host not in approved_image_hosts: - console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") - retry_mode = True - images_reuploaded = True - img_host_index += 1 - continue - else: - meta['imghost'] = current_img_host - console.print(f"[green]Uploading to approved host '{current_img_host}'.") - break - - uploaded_images, _ = prep.upload_screens( - meta, multi_screens, img_host_index, 0, multi_screens, - all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode - ) - - if uploaded_images: - meta[new_images_key] = uploaded_images + if not meta.get('skip_imghost_upload', False): + uploaded_images = [] + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) - if meta['debug']: - for image in uploaded_images: - console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") - for image in meta.get(new_images_key, []): - raw_url = image['raw_url'] - parsed_url = urlparse(raw_url) - hostname = parsed_url.netloc - mapped_host = self.match_host(hostname, url_host_mapping.keys()) - mapped_host = url_host_mapping.get(mapped_host, mapped_host) + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break - if mapped_host not in approved_image_hosts: - console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") - return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + uploaded_images, _ = upload_screens( + meta, multi_screens, img_host_index, 0, multi_screens, + all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode + ) - if all( - url_host_mapping.get( - self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), - self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), - ) in approved_image_hosts - for image in meta[new_images_key] - ): + if uploaded_images: + meta[new_images_key] = uploaded_images + if meta['debug']: + for image in uploaded_images: + console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + + for image in meta.get(new_images_key, []): + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) + + if mapped_host not in approved_image_hosts: + console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta[new_images_key] + ): + + return meta[new_images_key], False, images_reuploaded + else: return meta[new_images_key], False, images_reuploaded async def edit_desc(self, meta): @@ -675,11 +673,14 @@ async def login(self, cookiefile): async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on MTV...") + + # Build request parameters params = { 't': 'search', 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'q': "" } + if meta['imdb_id'] not in ("0", "", None): params['imdbid'] = "tt" + meta['imdb_id'] elif meta['tmdb'] != "0": @@ -690,19 +691,31 @@ async def search_existing(self, meta, disctype): params['q'] = meta['title'].replace(': ', ' ').replace('’', '').replace("'", '') try: - rr = requests.get(url=self.search_url, params=params) - if rr is not None: - # process search results - response_xml = xml.etree.ElementTree.fromstring(rr.text) - for each in response_xml.find('channel').findall('item'): - result = each.find('title').text - dupes.append(result) - else: - if 'status_message' in rr: - console.print(f"[yellow]{rr.get('status_message')}") - await asyncio.sleep(5) + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + + if response.status_code == 200 and response.text: + # Parse XML response + try: + response_xml = ET.fromstring(response.text) + for each in response_xml.find('channel').findall('item'): + result = each.find('title').text + dupes.append(result) + except ET.ParseError: + console.print("[red]Failed to parse XML response from MTV API") else: - console.print("[red]Site Seems to be down or not responding to API") + # Handle potential error messages + if response.status_code != 200: + console.print(f"[red]HTTP request failed. Status: {response.status_code}") + elif 'status_message' in response.json(): + console.print(f"[yellow]{response.json().get('status_message')}") + await asyncio.sleep(5) + else: + console.print("[red]Site Seems to be down or not responding to API") + except httpx.TimeoutException: + console.print("[red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[red]Unable to search for existing torrents: {e}") except Exception: console.print("[red]Unable to search for existing torrents on site. Most likely the site is down.") dupes.append("FAILED SEARCH") diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 0f58e2fb..b97ce748 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- -# import discord import asyncio import requests from guessit import guessit +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -83,20 +83,23 @@ async def search_existing(self, meta, disctype): if meta['category'] != 'TV': console.print("[red]Only TV Is allowed at NBL") meta['skipping'] = "NBL" - return + return [] + if meta.get('is_disc') is not None: console.print('[bold red]This site does not allow raw discs') meta['skipping'] = "NBL" - return + return [] + dupes = [] console.print("[yellow]Searching for existing torrents on NBL...") + if int(meta.get('tvmaze_id', 0)) != 0: search_term = {'tvmaze': int(meta['tvmaze_id'])} - elif int(meta.get('imdb_id', '0').replace('tt', '')) == 0: + elif int(meta.get('imdb_id', '0').replace('tt', '')) != 0: search_term = {'imdb': meta.get('imdb_id', '0').replace('tt', '')} else: search_term = {'series': meta['title']} - json = { + payload = { 'jsonrpc': '2.0', 'id': 1, 'method': 'getTorrents', @@ -105,30 +108,38 @@ async def search_existing(self, meta, disctype): search_term ] } + try: - response = requests.get(url=self.search_url, json=json) - response = response.json() - for each in response['result']['items']: - if meta['resolution'] in each['tags']: - if meta.get('tv_pack', 0) == 1: - if each['cat'] == "Season" and int(guessit(each['rls_name']).get('season', '1')) == int(meta.get('season_int')): - dupes.append(each['rls_name']) - elif int(guessit(each['rls_name']).get('episode', '0')) == int(meta.get('episode_int')): - dupes.append(each['rls_name']) - except requests.exceptions.JSONDecodeError: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.post(self.search_url, json=payload) + if response.status_code == 200: + data = response.json() + for each in data['result']['items']: + if meta['resolution'] in each['tags']: + if meta.get('tv_pack', 0) == 1: + if each['cat'] == "Season" and int(guessit(each['rls_name']).get('season', '1')) == int(meta.get('season_int')): + dupes.append(each['rls_name']) + elif int(guessit(each['rls_name']).get('episode', '0')) == int(meta.get('episode_int')): + dupes.append(each['rls_name']) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.HTTPStatusError as e: + console.print(f"[bold red]HTTP error occurred: {e}") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except httpx.JSONDecodeError: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) except KeyError as e: - console.print(response) - console.print("\n\n\n") - if e.args[0] == 'result': + console.print(f"[bold red]Unexpected KeyError: {e}") + if 'result' not in response.json(): console.print(f"Search Term: {search_term}") console.print('[red]NBL API Returned an unexpected response, please manually check for dupes') dupes.append("ERROR: PLEASE CHECK FOR EXISTING RELEASES MANUALLY") - await asyncio.sleep(5) - else: - console.print_exception() - except Exception: + await asyncio.sleep(5) + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") console.print_exception() return dupes diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 91ac0b6f..068afdff 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -10,7 +10,7 @@ from src.bbcode import BBCODE from src.trackers.COMMON import COMMON from src.console import console -import bencodepy +import httpx class OE(): @@ -69,7 +69,8 @@ async def upload(self, meta, disctype): mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + open_torrent = open(torrent_file_path, 'rb') files = {'torrent': open_torrent} data = { 'name': oe_name, @@ -118,11 +119,12 @@ async def upload(self, meta, disctype): if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://onlyencodes.cc/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") - open_torrent.close() return else: console.print("[cyan]Request Data:") @@ -320,6 +322,10 @@ async def search_existing(self, meta, disctype): console.print('[bold red]Concerts not allowed.') meta['skipping'] = "OE" return + if meta['is_disc'] == "DVD": + console.print('[bold red]Skipping DVD') + meta['skipping'] = "OE" + return dupes = [] console.print("[yellow]Searching for existing torrents on OE...") params = { @@ -335,52 +341,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] + meta['edition'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index cec5f7dc..85d09dce 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -4,7 +4,6 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob import httpx @@ -151,6 +150,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://oldtoons.world/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -195,42 +197,3 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index c4abe60f..0f2492d9 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -4,9 +4,9 @@ import requests import platform from str2bool import str2bool -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -146,6 +146,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://privatesilverscreen.cc/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -168,54 +171,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 88fea3a8..f4cf61af 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -7,6 +7,7 @@ import glob from str2bool import str2bool import pickle +import httpx from unidecode import unidecode from urllib.parse import urlparse from src.trackers.COMMON import COMMON @@ -63,27 +64,38 @@ async def search_existing(self, meta, disctype): dupes = [] common = COMMON(config=self.config) cookiefile = f"{meta['base_dir']}/data/cookies/PTER.txt" - if os.path.exists(cookiefile): - with requests.Session() as session: - session.cookies.update(await common.parseCookieFile(cookiefile)) - if int(meta['imdb_id'].replace('tt', '')) != 0: - imdb = f"tt{meta['imdb_id']}" - else: - imdb = "" - source = await self.get_type_medium_id(meta) - search_url = f"https://pterclub.com/torrents.php?search={imdb}&incldead=0&search_mode=0&source{source}=1" - r = session.get(search_url) - soup = BeautifulSoup(r.text, 'lxml') - rows = soup.select('table.torrents > tr:has(table.torrentname)') - for row in rows: - text = row.select_one('a[href^="details.php?id="]') - if text is not None: - release = text.attrs['title'] - if release: - dupes.append(release) - else: + if not os.path.exists(cookiefile): console.print("[bold red]Missing Cookie File. (data/cookies/PTER.txt)") return False + cookies = await common.parseCookieFile(cookiefile) + imdb = f"tt{meta['imdb_id']}" if int(meta['imdb_id'].replace('tt', '')) != 0 else "" + source = await self.get_type_medium_id(meta) + search_url = f"https://pterclub.com/torrents.php?search={imdb}&incldead=0&search_mode=0&source{source}=1" + + try: + async with httpx.AsyncClient(cookies=cookies, timeout=10.0) as client: + response = await client.get(search_url) + + if response.status_code == 200: + soup = BeautifulSoup(response.text, 'lxml') + rows = soup.select('table.torrents > tr:has(table.torrentname)') + for row in rows: + text = row.select_one('a[href^="details.php?id="]') + if text is not None: + release = text.attrs.get('title', '') + if release: + dupes.append(release) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + console.print_exception() + return dupes async def get_type_category_id(self, meta): diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index d2ff5037..b757ee78 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -7,10 +7,10 @@ from str2bool import str2bool import json import glob -import multiprocessing import platform import pickle import click +import httpx from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON from src.bbcode import BBCODE @@ -18,6 +18,9 @@ from src.console import console from torf import Torrent from datetime import datetime +from src.takescreens import disc_screenshots, dvd_screenshots, screenshots +from src.uploadscreens import upload_screens +from src.torrentcreate import CustomTorrent, torf_cb class PTP(): @@ -229,8 +232,9 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): meta['description'] = ptp_desc meta['saved_description'] = True else: - meta['description'] = ptp_desc - meta['saved_description'] = True + if not meta['is_disc']: + meta['description'] = ptp_desc + meta['saved_description'] = True return desc, imagelist @@ -330,6 +334,7 @@ async def search_existing(self, groupID, meta, disctype): elif meta['resolution'] in ["2160p", "4320p", "8640p"]: quality = "Ultra High Definition" + # Prepare request parameters and headers params = { 'id': groupID, } @@ -339,19 +344,33 @@ async def search_existing(self, groupID, meta, disctype): 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' - response = requests.get(url=url, headers=headers, params=params) - await asyncio.sleep(1) - existing = [] + try: - response = response.json() - torrents = response.get('Torrents', []) - if len(torrents) != 0: - for torrent in torrents: - if torrent.get('Quality') == quality and quality is not None: - existing.append(f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}") - except Exception: - console.print("[red]An error has occured trying to find existing releases") - return existing + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(url, headers=headers, params=params) + await asyncio.sleep(1) # Mimic server-friendly delay + if response.status_code == 200: + existing = [] + try: + data = response.json() + torrents = data.get('Torrents', []) + for torrent in torrents: + if torrent.get('Quality') == quality and quality is not None: + existing.append(f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}") + except ValueError: + console.print("[red]Failed to parse JSON response from API.") + return existing + else: + console.print(f"[bold red]HTTP request failed with status code {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out while trying to find existing releases.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + console.print_exception() + + return [] async def ptpimg_url_rehost(self, image_url): payload = { @@ -621,8 +640,6 @@ def convert_bbcode(self, desc): return desc async def edit_desc(self, meta): - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) @@ -694,14 +711,13 @@ async def edit_desc(self, meta): meta[new_images_key] = [] new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: - use_vs = meta.get('vapoursynth', False) - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + try: + disc_screenshots(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], meta.get('vapoursynth', False), [], meta.get('ffdebug', False), multi_screens, True) + except Exception as e: + print(f"Error during BDMV screenshot capture: {e}") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if new_screens and not meta.get('skip_imghost_upload', False): + uploaded_images, _ = upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) for img in uploaded_images: meta[new_images_key].append({ 'img_url': img['img_url'], @@ -747,13 +763,15 @@ async def edit_desc(self, meta): meta[new_images_key] = [] new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if not new_screens: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens, True)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + try: + dvd_screenshots( + meta, i, multi_screens, True + ) + except Exception as e: + print(f"Error during DVD screenshot capture: {e}") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if new_screens and not meta.get('skip_imghost_upload', False): + uploaded_images, _ = upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) for img in uploaded_images: meta[new_images_key].append({ 'img_url': img['img_url'], @@ -813,13 +831,14 @@ async def edit_desc(self, meta): meta[new_images_key] = [] new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None)) - s.start() - while s.is_alive() is True: - await asyncio.sleep(3) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + try: + screenshots( + file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None) + except Exception as e: + print(f"Error during generic screenshot capture: {e}") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if new_screens and not meta.get('skip_imghost_upload', False): + uploaded_images, _ = upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) for img in uploaded_images: meta[new_images_key].append({ 'img_url': img['img_url'], @@ -980,10 +999,6 @@ async def upload(self, meta, url, data, disctype): if torrent.piece_size > 16777216: # 16 MiB in bytes console.print("[red]Piece size is OVER 16M and does not work on PTP. Generating a new .torrent") - # Import Prep and regenerate the torrent with 16 MiB piece size limit - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - if meta['is_disc']: include = [] exclude = [] @@ -991,10 +1006,7 @@ async def upload(self, meta, url, data, disctype): include = ["*.mkv", "*.mp4", "*.ts"] exclude = ["*.*", "*sample.mkv", "!sample*.*"] - # Create a new torrent with piece size explicitly set to 8 MiB - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - new_torrent = prep.CustomTorrent( + new_torrent = CustomTorrent( meta=meta, path=Path(meta['path']), trackers=[self.announce_url], @@ -1013,7 +1025,7 @@ async def upload(self, meta, url, data, disctype): # Validate and write the new torrent new_torrent.validate_piece_size() - new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.generate(callback=torf_cb, interval=5) new_torrent.write(torrent_path, overwrite=True) # Proceed with the upload process diff --git a/src/trackers/PTT.py b/src/trackers/PTT.py index b3dacc16..2fb84e00 100644 --- a/src/trackers/PTT.py +++ b/src/trackers/PTT.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -144,6 +144,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://polishtorrent.top/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -166,54 +169,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index c340c17d..f6cf90f4 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -5,9 +5,9 @@ from str2bool import str2bool import tmdbsimple as tmdb import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -164,54 +164,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] try: - response = requests.get(url=url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 306432ca..dacac225 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -5,9 +5,9 @@ import platform import re from str2bool import str2bool -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -53,7 +53,8 @@ async def upload(self, meta, disctype): mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + open_torrent = open(torrent_file_path, 'rb') files = {'torrent': open_torrent} base_dir = meta['base_dir'] uuid = meta['uuid'] @@ -107,6 +108,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://reelflix.xyz/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -184,54 +188,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 4f1576c6..362c5929 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -94,21 +94,20 @@ async def search_existing(self, meta, disctype): if any(keyword in meta['keywords'] for keyword in disallowed_keywords): console.print('[bold red]XXX not allowed.') meta['skipping'] = "RTF" - return + return [] + if datetime.date.today().year - meta['year'] <= 9: console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") meta['skipping'] = "RTF" - return + return [] + dupes = [] console.print("[yellow]Searching for existing torrents on RTF...") headers = { 'accept': 'application/json', 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } - - params = { - 'includingDead': '1' - } + params = {'includingDead': '1'} if meta['imdb_id'] != "0": params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] @@ -116,13 +115,23 @@ async def search_existing(self, meta, disctype): params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') try: - response = requests.get(url=self.search_url, params=params, headers=headers) - response = response.json() - for each in response: - result = [each][0]['name'] - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(self.search_url, params=params, headers=headers) + if response.status_code == 200: + data = response.json() + for each in data: + result = each['name'] + dupes.append(result) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + console.print_exception() await asyncio.sleep(5) return dupes diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index ebf7f298..ae5ff081 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -144,6 +144,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://shareisland.org/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -166,54 +169,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 5f10c7eb..c9bb7845 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import requests import asyncio +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -124,34 +125,42 @@ async def edit_desc(self, meta): async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on SN...") - params = { 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - # using title if IMDB id does not exist to search + # Determine search parameters based on metadata if meta['imdb_id'] == 0: if meta['category'] == 'TV': - params['filter'] = meta['title'] + f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] + params['filter'] = f"{meta['title']}{meta.get('season', '')}{meta.get('episode', '')} {meta['resolution']}" else: params['filter'] = meta['title'] else: - # using IMDB_id to search if it exists. + params['media_ref'] = f"tt{meta['imdb_id']}" if meta['category'] == 'TV': - params['media_ref'] = f"tt{meta['imdb_id']}" - params['filter'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] + params['filter'] = f"{meta.get('season', '')}{meta.get('episode', '')} {meta['resolution']}" else: - params['media_ref'] = f"tt{meta['imdb_id']}" params['filter'] = meta['resolution'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for i in response['data']: - result = i['name'] - dupes.append(result) - except Exception: - console.print('[red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for i in data.get('data', []): + result = i.get('name') + if result: + dupes.append(result) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + console.print_exception() await asyncio.sleep(5) return dupes diff --git a/src/trackers/SPD.py b/src/trackers/SPD.py index 6dfa2956..7eacba45 100644 --- a/src/trackers/SPD.py +++ b/src/trackers/SPD.py @@ -9,12 +9,10 @@ import shutil import os import traceback - +import httpx from src.trackers.COMMON import COMMON -# from pprint import pprint - class SPD(): def __init__(self, config): @@ -141,13 +139,23 @@ async def search_existing(self, meta, disctype): params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') try: - response = requests.get(url=self.search_url, params=params, headers=headers) - response = response.json() - for each in response: - result = [each][0]['name'] - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=10.0) as client: + response = client.get(url=self.search_url, params=params, headers=headers) + if response.status_code == 200: + data = response.json() + for each in data: + result = [each][0]['name'] + dupes.append(result) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + console.print_exception() await asyncio.sleep(5) return dupes diff --git a/src/trackers/STC.py b/src/trackers/STC.py index ff72fc63..65edc712 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -3,9 +3,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -101,11 +101,12 @@ async def upload(self, meta, disctype): if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://skipthecommericals.xyz/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") - open_torrent.close() return else: console.print("[cyan]Request Data:") @@ -184,52 +185,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] + meta['edition'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 076a0f50..7e39215c 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -101,6 +101,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://skipthetrailers.xyz/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -163,54 +166,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/THR.py b/src/trackers/THR.py index f3655697..ff2b6487 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -8,8 +8,9 @@ import os import re import platform +import httpx +from bs4 import BeautifulSoup from unidecode import unidecode - from src.console import console @@ -257,19 +258,33 @@ async def edit_desc(self, meta): desc.close() return pronfo - def search_existing(self, session, imdb_id, disctype): - from bs4 import BeautifulSoup + async def search_existing(self, session, imdb_id, disctype): imdb_id = imdb_id.replace('tt', '') search_url = f"https://www.torrenthr.org/browse.php?search={imdb_id}&blah=2&incldead=1" - search = session.get(search_url) - soup = BeautifulSoup(search.text, 'html.parser') dupes = [] - for link in soup.find_all('a', href=True): - if link['href'].startswith('details.php'): - if link.get('onmousemove', False): - dupe = link['onmousemove'].split("','/images") - dupe = dupe[0].replace("return overlibImage('", "") - dupes.append(dupe) + + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(search_url) + if response.status_code == 200: + soup = BeautifulSoup(response.text, 'html.parser') + for link in soup.find_all('a', href=True): + if link['href'].startswith('details.php'): + if link.get('onmousemove', False): + dupe = link['onmousemove'].split("','/images")[0] + dupe = dupe.replace("return overlibImage('", "") + dupes.append(dupe) + else: + console.print(f"[bold red]HTTP request failed. Status: {response.status_code}") + + except httpx.TimeoutException: + console.print("[bold red]Request timed out while searching for existing torrents.") + except httpx.RequestError as e: + console.print(f"[bold red]An error occurred while making the request: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + console.print_exception() + return dupes def login(self, session): diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 788a2164..eba0b967 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -10,7 +10,7 @@ import urllib.request import click from str2bool import str2bool -import bencodepy +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -128,10 +128,11 @@ async def upload(self, meta, disctype): if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - console.print(data) - console.print(f"TIK response: {response}") try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://cinematik.net/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -582,54 +583,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 9337e8a8..88db4004 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -5,6 +5,7 @@ import re import os import cli_ui +import httpx from str2bool import str2bool from unidecode import unidecode from urllib.parse import urlparse @@ -179,31 +180,49 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) + cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") + if not os.path.exists(cookiefile): + console.print("[bold red]Cookie file not found: TTG.pkl") + return [] + with open(cookiefile, 'rb') as cf: + cookies = pickle.load(cf) - if int(meta['imdb_id'].replace('tt', '')) != 0: - imdb = f"imdb{meta['imdb_id'].replace('tt', '')}" - else: - imdb = "" - if meta.get('is_disc', '') == "BDMV": - res_type = f"{meta['resolution']} Blu-ray" - elif meta.get('is_disc', '') == "DVD": - res_type = "DVD" - else: - res_type = meta['resolution'] - search_url = f"https://totheglory.im/browse.php?search_field= {imdb} {res_type}" - r = session.get(search_url) - await asyncio.sleep(0.5) - soup = BeautifulSoup(r.text, 'html.parser') - find = soup.find_all('a', href=True) - for each in find: - if each['href'].startswith('/t/'): - release = re.search(r"()()?(.*))()?(.*)= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index d4a530e2..fd0a4682 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -4,7 +4,7 @@ import requests import platform from str2bool import str2bool -import bencodepy +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -31,6 +31,7 @@ def __init__(self, config): self.source_flag = 'Source flag for .torrent' self.upload_url = 'https://domain.tld/api/torrents/upload' self.search_url = 'https://domain.tld/api/torrents/filter' + self.torrent_url = 'https://domain.tld/torrents/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass @@ -143,6 +144,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), self.torrent_url + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -165,54 +169,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 375fc217..bbe1ad13 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -110,9 +110,11 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://utp.to/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") - return else: console.print("[cyan]Request Data:") @@ -165,54 +167,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index a6d2cc1d..5a82afda 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -4,9 +4,9 @@ import requests from str2bool import str2bool import platform -import bencodepy import os import glob +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -144,6 +144,9 @@ async def upload(self, meta, disctype): response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) + # adding torrent link to comment of torrent file + t_id = response.json()['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://yoinked.org/torrents/" + t_id) except Exception: console.print("It may have uploaded, go check") return @@ -166,54 +169,21 @@ async def search_existing(self, meta, disctype): if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + data = response.json() + for each in data['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") await asyncio.sleep(5) return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/src/trackersetup.py b/src/trackersetup.py index 1f478799..a3c3de49 100644 --- a/src/trackersetup.py +++ b/src/trackersetup.py @@ -98,13 +98,6 @@ def check_banned_group(self, tracker, banned_group_list, meta): 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, } -tracker_capabilities = { - 'AITHER': {'mod_q': True, 'draft': False}, - 'BHD': {'draft_live': True}, - 'BLU': {'mod_q': True, 'draft': False}, - 'LST': {'mod_q': True, 'draft': True} -} - api_trackers = { 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HHD', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK' diff --git a/src/trackerstatus.py b/src/trackerstatus.py new file mode 100644 index 00000000..39261c6c --- /dev/null +++ b/src/trackerstatus.py @@ -0,0 +1,140 @@ +import asyncio +import os +from torf import Torrent +from src.trackers.PTP import PTP +from src.trackersetup import TRACKER_SETUP, tracker_class_map +from src.console import console +from data.config import config +from src.trackers.COMMON import COMMON +from src.clients import Clients +from src.uphelper import UploadHelper +from src.imdb import get_imdb_info_api +from src.torrentcreate import create_base_from_existing_torrent +import cli_ui +import copy + + +async def process_all_trackers(meta): + tracker_status = {} + successful_trackers = 0 + common = COMMON(config=config) + client = Clients(config=config) + tracker_setup = TRACKER_SETUP(config=config) + helper = UploadHelper() + meta_lock = asyncio.Lock() # noqa F841 + + async def process_single_tracker(tracker_name, shared_meta): + nonlocal successful_trackers + local_meta = copy.deepcopy(shared_meta) # Ensure each task gets its own copy of meta + local_tracker_status = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False} + disctype = local_meta.get('disctype', None) + tracker_name = tracker_name.replace(" ", "").upper().strip() + + if local_meta['name'].endswith('DUPE?'): + local_meta['name'] = local_meta['name'].replace(' DUPE?', '') + + if tracker_name in tracker_class_map: + tracker_class = tracker_class_map[tracker_name](config=config) + if tracker_name in {"THR", "PTP"}: + if local_meta.get('imdb_id', '0') == '0': + imdb_id = "0000000" if local_meta['unattended'] else cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + + if tracker_name == "PTP": + console.print("[yellow]Searching for Group ID") + ptp = PTP(config=config) + groupID = await ptp.get_group_by_imdb(local_meta['imdb_id']) + if groupID is None: + console.print("[yellow]No Existing Group found") + if local_meta.get('youtube', None) is None or "youtube" not in str(local_meta.get('youtube', '')): + youtube = "" if local_meta['unattended'] else cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") + meta['youtube'] = youtube + meta['ptp_groupID'] = groupID + + if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, local_meta): + console.print(f"[red]Tracker '{tracker_name}' is banned. Skipping.[/red]") + local_tracker_status['banned'] = True + + if tracker_name not in {"THR", "PTP", "TL"}: + dupes = await tracker_class.search_existing(local_meta, disctype) + elif tracker_name == "PTP": + dupes = await ptp.search_existing(groupID, local_meta, disctype) + + if 'skipping' not in local_meta or local_meta['skipping'] is None: + dupes = await common.filter_dupes(dupes, local_meta, tracker_name) + local_meta, is_dupe = await helper.dupe_check(dupes, local_meta, tracker_name) + if is_dupe: + console.print(f"[red]Skipping upload on {tracker_name}[/red]") + local_tracker_status['dupe'] = True + elif local_meta['skipping']: + local_tracker_status['skipped'] = True + + if tracker_name == "MTV": + if not local_tracker_status['banned'] and not local_tracker_status['skipped'] and not local_tracker_status['dupe']: + tracker_config = config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('prefer_mtv_torrent', 'false')).lower() == "true": + local_meta['prefer_small_pieces'] = True + else: + local_meta['prefer_small_pieces'] = False + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": + torrent_path = os.path.abspath(f"{local_meta['base_dir']}/tmp/{local_meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + check_torrent = await client.find_existing_torrent(local_meta) + if check_torrent: + console.print(f"[yellow]Existing torrent found on {check_torrent}[yellow]") + await create_base_from_existing_torrent(check_torrent, local_meta['base_dir'], local_meta['uuid']) + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + console.print("[yellow]No existing torrent found with piece size lesser than 8MB[yellow]") + local_tracker_status['skipped'] = True + elif os.path.exists(torrent_path): + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + console.print("[yellow]Existing torrent found with piece size greater than 8MB[yellow]") + local_tracker_status['skipped'] = True + + if local_meta.get('skipping') is None and not local_tracker_status['dupe'] and tracker_name == "PTP": + if local_meta.get('imdb_info', {}) == {}: + meta['imdb_info'] = await get_imdb_info_api(local_meta['imdb_id'], local_meta) + + if not local_meta['debug']: + if not local_tracker_status['banned'] and not local_tracker_status['skipped'] and not local_tracker_status['dupe']: + console.print(f"[bold yellow]Tracker '{tracker_name}' passed all checks.") + if not local_meta['unattended'] or (local_meta['unattended'] and local_meta.get('unattended-confirm', False)): + edit_choice = "y" if local_meta['unattended'] else input("Enter 'y' to upload, or press enter to skip uploading:") + if edit_choice.lower() == 'y': + local_tracker_status['upload'] = True + successful_trackers += 1 + else: + local_tracker_status['upload'] = False + else: + local_tracker_status['upload'] = True + successful_trackers += 1 + else: + local_tracker_status['upload'] = True + successful_trackers += 1 + + return tracker_name, local_tracker_status + + if meta.get('unattended', False): + tasks = [process_single_tracker(tracker_name, meta) for tracker_name in meta['trackers']] + results = await asyncio.gather(*tasks) + for tracker_name, status in results: + tracker_status[tracker_name] = status + else: + for tracker_name in meta['trackers']: + tracker_name, status = await process_single_tracker(tracker_name, meta) + tracker_status[tracker_name] = status + + if meta['debug']: + console.print("\n[bold]Tracker Processing Summary:[/bold]") + for t_name, status in tracker_status.items(): + banned_status = 'Yes' if status['banned'] else 'No' + skipped_status = 'Yes' if status['skipped'] else 'No' + dupe_status = 'Yes' if status['dupe'] else 'No' + upload_status = 'Yes' if status['upload'] else 'No' + console.print(f"Tracker: {t_name} | Banned: {banned_status} | Skipped: {skipped_status} | Dupe: {dupe_status} | [yellow]Upload:[/yellow] {upload_status}") + console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") + + meta['tracker_status'] = tracker_status + return successful_trackers diff --git a/src/tvmaze.py b/src/tvmaze.py new file mode 100644 index 00000000..f6f9b40e --- /dev/null +++ b/src/tvmaze.py @@ -0,0 +1,115 @@ +from src.console import console +import requests +import json + + +async def search_tvmaze(filename, year, imdbID, tvdbID, meta): + try: + tvdbID = int(tvdbID) if tvdbID is not None else 0 + except ValueError: + print(f"Error: tvdbID is not a valid integer. Received: {tvdbID}") + tvdbID = 0 + + if meta.get('tvmaze_manual'): + tvmazeID = int(meta['tvmaze_manual']) + return tvmazeID, imdbID, tvdbID + else: + tvmazeID = 0 + results = [] + + if imdbID is None: + imdbID = '0' + + if meta['manual_date'] is None: + if int(tvdbID) != 0: + tvdb_resp = await _make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) + if tvdb_resp: + results.append(tvdb_resp) + else: + if int(imdbID) != 0: + imdb_resp = await _make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) + if imdb_resp: + results.append(imdb_resp) + else: + search_resp = await _make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) + else: + if int(tvdbID) != 0: + tvdb_resp = await _make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) + if tvdb_resp: + results.append(tvdb_resp) + if int(imdbID) != 0: + imdb_resp = await _make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) + if imdb_resp: + results.append(imdb_resp) + search_resp = await _make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) + + seen = set() + unique_results = [] + for show in results: + if show['id'] not in seen: + seen.add(show['id']) + unique_results.append(show) + results = unique_results + + if not results: + if meta['debug']: + print("No results found.") + return tvmazeID, imdbID, tvdbID + + if meta['manual_date'] is not None: + print("Search results:") + for idx, show in enumerate(results): + console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") + console.print(f"[yellow] Premiered: {show.get('premiered', 'Unknown')}[/yellow]") + console.print(f" Externals: {json.dumps(show.get('externals', {}), indent=2)}") + + while True: + try: + choice = int(input(f"Enter the number of the correct show (1-{len(results)}) or 0 to skip: ")) + if choice == 0: + print("Skipping selection.") + break + if 1 <= choice <= len(results): + selected_show = results[choice - 1] + tvmazeID = selected_show['id'] + print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + break + else: + print(f"Invalid choice. Please choose a number between 1 and {len(results)}, or 0 to skip.") + except ValueError: + print("Invalid input. Please enter a number.") + else: + selected_show = results[0] + tvmazeID = selected_show['id'] + if meta['debug']: + print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + + if meta['debug']: + print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") + return tvmazeID, imdbID, tvdbID + + +async def _make_tvmaze_request(url, params, meta): + if meta['debug']: + print(f"Requesting TVmaze API: {url} with params: {params}") + try: + resp = requests.get(url, params=params) + if resp.ok: + return resp.json() + else: + if meta['debug']: + print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") + return None + except Exception as e: + print(f"Error making TVmaze request: {e}") + return None diff --git a/src/uphelper.py b/src/uphelper.py index f3a07213..37c32482 100644 --- a/src/uphelper.py +++ b/src/uphelper.py @@ -6,19 +6,19 @@ class UploadHelper: - def dupe_check(self, dupes, meta, tracker_name): + async def dupe_check(self, dupes, meta, tracker_name): if not dupes: console.print("[green]No dupes found") meta['upload'] = True return meta, False else: - console.print() - dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) - console.print() - cli_ui.info_section(cli_ui.bold, f"Check if these are actually dupes from {tracker_name}!") - cli_ui.info(dupe_text) - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + console.print() + dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) + console.print() + cli_ui.info_section(cli_ui.bold, f"Check if these are actually dupes from {tracker_name}!") + cli_ui.info(dupe_text) + console.print() if meta.get('dupe', False) is False: print() upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) @@ -26,18 +26,16 @@ def dupe_check(self, dupes, meta, tracker_name): upload = True else: if meta.get('dupe', False) is False: - console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") + console.print(f"[red]Found potential dupes on {tracker_name}. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") upload = False else: - console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") + console.print(f"[yellow]Found potential dupes on {tracker_name}. --skip-dupe-check was passed. Uploading anyways") upload = True console.print() if upload is False: - meta['upload'] = False return meta, True else: - meta['upload'] = True for each in dupes: each_name = each['name'] if isinstance(each, dict) else each if each_name == meta['name']: @@ -45,7 +43,7 @@ def dupe_check(self, dupes, meta, tracker_name): return meta, False - def get_confirmation(self, meta): + async def get_confirmation(self, meta): if meta['debug'] is True: console.print("[bold red]DEBUG: True") console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") @@ -76,7 +74,7 @@ def get_confirmation(self, meta): console.print("[bold green]Personal Release![/bold green]") console.print() if meta.get('unattended', False) is False: - self.get_missing(meta) + await self.get_missing(meta) ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" if ring_the_bell: console.print(ring_the_bell) @@ -100,7 +98,7 @@ def get_confirmation(self, meta): return confirm - def get_missing(self, meta): + async def get_missing(self, meta): info_notes = { 'edition': 'Special Edition/Release', 'description': "Please include Remux/Encode Notes if possible", diff --git a/src/uploadscreens.py b/src/uploadscreens.py new file mode 100644 index 00000000..63297951 --- /dev/null +++ b/src/uploadscreens.py @@ -0,0 +1,379 @@ +from src.console import console +from data.config import config +import os +import pyimgbox +import asyncio +import requests +import glob +import base64 +import time +from tqdm import tqdm +import sys +from concurrent.futures import ThreadPoolExecutor, as_completed + + +def upload_image_task(args): + image, img_host, config, meta = args + try: + timeout = 60 # Default timeout + img_url, raw_url, web_url = None, None, None + + if img_host == "imgbox": + try: + # Call the asynchronous imgbox_upload function + loop = asyncio.get_event_loop() + image_list = loop.run_until_complete( + imgbox_upload(os.getcwd(), [image], meta, return_dict={}) + ) + if image_list and all( + 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list + ): + img_url = image_list[0]['img_url'] + raw_url = image_list[0]['raw_url'] + web_url = image_list[0]['web_url'] + else: + return { + 'status': 'failed', + 'reason': "Imgbox upload failed. No valid URLs returned." + } + except Exception as e: + return { + 'status': 'failed', + 'reason': f"Error during Imgbox upload: {str(e)}" + } + + elif img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post( + "https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout + ) + response_data = response.json() + if response_data: + code = response_data[0]['code'] + ext = response_data[0]['ext'] + img_url = f"https://ptpimg.me/{code}.{ext}" + raw_url = img_url + web_url = img_url + + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + try: + with open(image, "rb") as img_file: + encoded_image = base64.b64encode(img_file.read()).decode('utf8') + + data = { + 'key': config['DEFAULT']['imgbb_api'], + 'image': encoded_image, + } + + response = requests.post(url, data=data, timeout=timeout) + + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response.status_code != 200 or not response_data.get('success'): + console.print("[yellow]imgbb failed, trying next image host") + return {'status': 'failed', 'reason': 'imgbb upload failed'} + + img_url = response_data['data'].get('medium', {}).get('url') or response_data['data']['thumb']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + + except ValueError as e: # JSON decoding error + console.print(f"[red]Invalid JSON response: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} + + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} + + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + try: + files = { + 'source': ('file-upload[0]', open(image, 'rb')), + } + headers = { + 'X-API-Key': config['DEFAULT']['ptscreens_api'] + } + response = requests.post(url, headers=headers, files=files, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]ptscreens failed, trying next image host") + return {'status': 'failed', 'reason': 'ptscreens upload failed'} + + img_url = response_data['image']['medium']['url'] + raw_url = response_data['image']['url'] + web_url = response_data['image']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} + + elif img_host == "oeimg": + url = "https://imgoe.download/api/1/upload" + try: + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['oeimg_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response.status_code != 200 or not response_data.get('success'): + console.print("[yellow]OEimg failed, trying next image host") + return {'status': 'failed', 'reason': 'OEimg upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} + + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350 + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')) + } + response = requests.post(url, data=data, files=files, timeout=timeout) + response_data = response.json() + if response.status_code == 200: + raw_url = response_data['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response_data['th_url'] + web_url = response_data['show_url'] + + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['lensdump_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + + if img_url and raw_url and web_url: + return { + 'status': 'success', + 'img_url': img_url, + 'raw_url': raw_url, + 'web_url': web_url, + 'local_file_path': image + } + else: + return { + 'status': 'failed', + 'reason': f"Failed to upload image to {img_host}. No URLs received." + } + + except Exception as e: + return { + 'status': 'failed', + 'reason': str(e) + } + + +def upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): + def use_tqdm(): + """Check if the environment supports TTY (interactive progress bar).""" + return sys.stdout.isatty() + + if meta['debug']: + upload_start_time = time.time() + + import nest_asyncio + nest_asyncio.apply() + os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") + initial_img_host = config['DEFAULT'][f'img_host_{img_host_num}'] + img_host = meta['imghost'] + using_custom_img_list = isinstance(custom_img_list, list) and bool(custom_img_list) + + if 'image_sizes' not in meta: + meta['image_sizes'] = {} + + if using_custom_img_list: + image_glob = custom_img_list + existing_images = [] + existing_count = 0 + else: + image_glob = glob.glob("*.png") + if 'POSTER.png' in image_glob: + image_glob.remove('POSTER.png') + image_glob = list(set(image_glob)) + if meta['debug']: + console.print("image globs:", image_glob) + + existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] + existing_count = len(existing_images) + + if not retry_mode: + images_needed = max(0, total_screens - existing_count) + else: + images_needed = total_screens + + if existing_count >= total_screens and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + console.print(f"[yellow]Skipping upload because enough images are already uploaded to {img_host}. Existing images: {existing_count}, Required: {total_screens}") + return meta['image_list'], total_screens + + upload_tasks = [(image, img_host, config, meta) for image in image_glob[:images_needed]] + + host_limits = { + "oeimg": 6, + "ptscreens": 1, + "lensdump": 1, + } + default_pool_size = int(meta.get('task_limit', os.cpu_count())) + pool_size = host_limits.get(img_host, default_pool_size) + results = [] + max_workers = min(len(upload_tasks), pool_size, os.cpu_count()) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {executor.submit(upload_image_task, task): task for task in upload_tasks} + + if sys.stdout.isatty(): # Check if running in terminal + with tqdm(total=len(upload_tasks), desc="Uploading Screenshots", ascii=True) as pbar: + for future in as_completed(future_to_task): + try: + result = future.result() + if result.get('status') == 'success': + results.append(result) + else: + console.print(f"[red]{result}") + except Exception as e: + console.print(f"[red]Error during upload: {str(e)}") + pbar.update(1) + else: + for future in as_completed(future_to_task): + result = future.result() + if not isinstance(result, str) or not result.startswith("Error"): + results.append(result) + else: + console.print(f"[red]{result}") + + # return meta['image_list'], len(meta['image_list']) + + successfully_uploaded = [] + for result in results: + if result['status'] == 'success': + successfully_uploaded.append(result) + else: + console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") + + if len(successfully_uploaded) < meta.get('cutoff') and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + img_host_num += 1 + if f'img_host_{img_host_num}' in config['DEFAULT']: + meta['imghost'] = config['DEFAULT'][f'img_host_{img_host_num}'] + console.print(f"[cyan]Switching to the next image host: {meta['imghost']}") + return upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + else: + console.print("[red]No more image hosts available. Aborting upload process.") + return meta['image_list'], len(meta['image_list']) + + new_images = [] + for upload in successfully_uploaded: + raw_url = upload['raw_url'] + new_image = { + 'img_url': upload['img_url'], + 'raw_url': raw_url, + 'web_url': upload['web_url'] + } + new_images.append(new_image) + if not using_custom_img_list and raw_url not in {img['raw_url'] for img in meta['image_list']}: + if meta['debug']: + console.print(f"[blue]Adding {raw_url} to image_list") + meta['image_list'].append(new_image) + local_file_path = upload.get('local_file_path') + if local_file_path: + image_size = os.path.getsize(local_file_path) + meta['image_sizes'][raw_url] = image_size + + console.print(f"[green]Successfully uploaded {len(new_images)} images.") + if meta['debug']: + upload_finish_time = time.time() + print(f"Screenshot uploads processed in {upload_finish_time - upload_start_time:.4f} seconds") + + if using_custom_img_list: + return new_images, len(new_images) + + return meta['image_list'], len(successfully_uploaded) + + +async def imgbox_upload(chdir, image_glob, meta, return_dict): + try: + os.chdir(chdir) + image_list = [] + + async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: + for image in image_glob: + try: + async for submission in gallery.add([image]): + if not submission['success']: + console.print(f"[red]Error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") + else: + web_url = submission.get('web_url') + img_url = submission.get('thumbnail_url') + raw_url = submission.get('image_url') + if web_url and img_url and raw_url: + image_dict = { + 'web_url': web_url, + 'img_url': img_url, + 'raw_url': raw_url + } + image_list.append(image_dict) + else: + console.print(f"[red]Incomplete URLs received for image: {image}") + except Exception as e: + console.print(f"[red]Error during upload for {image}: {str(e)}") + + return_dict['image_list'] = image_list + return image_list + + except Exception as e: + console.print(f"[red]An error occurred while uploading images to imgbox: {str(e)}") + return [] diff --git a/upload.py b/upload.py index 8fc5ab4b..33de2485 100644 --- a/upload.py +++ b/upload.py @@ -1,11 +1,7 @@ #!/usr/bin/env python3 - -import requests from src.args import Args from src.clients import Clients -from src.trackers.COMMON import COMMON -from src.trackers.THR import THR -from src.trackers.PTP import PTP +from src.uploadscreens import upload_screens import json from pathlib import Path import asyncio @@ -13,18 +9,15 @@ import sys import platform import shutil -import glob import cli_ui import traceback -import click -import re -from src.trackersetup import TRACKER_SETUP, tracker_class_map, api_trackers, other_api_trackers, http_trackers, tracker_capabilities import time +from src.trackersetup import tracker_class_map, api_trackers, other_api_trackers, http_trackers +from src.trackerhandle import process_trackers +from src.queuemanage import handle_queue from src.console import console -from rich.markdown import Markdown -from rich.style import Style - +from src.torrentcreate import create_torrent, create_random_torrents, create_base_from_existing_torrent cli_ui.setup(color='always', title="Audionut's Upload Assistant") @@ -46,114 +39,7 @@ parser = Args(config) -def get_log_file(base_dir, queue_name): - """ - Returns the path to the log file for the given base directory and queue name. - """ - safe_queue_name = queue_name.replace(" ", "_") - return os.path.join(base_dir, "tmp", f"{safe_queue_name}_processed_files.log") - - -def load_processed_files(log_file): - """ - Loads the list of processed files from the log file. - """ - if os.path.exists(log_file): - with open(log_file, "r") as f: - return set(json.load(f)) - return set() - - -def save_processed_file(log_file, file_path): - """ - Adds a processed file to the log. - """ - processed_files = load_processed_files(log_file) - processed_files.add(file_path) - with open(log_file, "w") as f: - json.dump(list(processed_files), f, indent=4) - - -def gather_files_recursive(path, allowed_extensions=None): - """ - Gather files and first-level subfolders. - Each subfolder is treated as a single unit, without exploring deeper. - """ - queue = [] - if os.path.isdir(path): - for entry in os.scandir(path): - if entry.is_dir(): - queue.append(entry.path) - elif entry.is_file() and (allowed_extensions is None or entry.name.lower().endswith(tuple(allowed_extensions))): - queue.append(entry.path) - elif os.path.isfile(path): - if allowed_extensions is None or path.lower().endswith(tuple(allowed_extensions)): - queue.append(path) - else: - console.print(f"[red]Invalid path: {path}") - return queue - - -def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): - """ - Handle glob patterns and split path resolution. - Treat subfolders as single units and filter files by allowed_extensions. - """ - queue = [] - if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: - escaped_path = path.replace('[', '[[]') - queue = [ - file for file in glob.glob(escaped_path) - if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) - ] - if queue: - display_queue(queue) - elif os.path.exists(os.path.dirname(path)) and len(paths) > 1: - queue = [ - file for file in paths - if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) - ] - display_queue(queue) - elif not os.path.exists(os.path.dirname(path)): - queue = [ - file for file in resolve_split_path(path) # noqa F8221 - if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) - ] - display_queue(queue) - return queue - - -def extract_safe_file_locations(log_file): - """ - Parse the log file to extract file locations under the 'safe' header. - - :param log_file: Path to the log file to parse. - :return: List of file paths from the 'safe' section. - """ - safe_section = False - safe_file_locations = [] - - with open(log_file, 'r') as f: - for line in f: - line = line.strip() - - # Detect the start and end of 'safe' sections - if line.lower() == "safe": - safe_section = True - continue - elif line.lower() in {"danger", "risky"}: - safe_section = False - - # Extract 'File Location' if in a 'safe' section - if safe_section and line.startswith("File Location:"): - match = re.search(r"File Location:\s*(.+)", line) - if match: - safe_file_locations.append(match.group(1).strip()) - - return safe_file_locations - - -def merge_meta(meta, saved_meta, path): +async def merge_meta(meta, saved_meta, path): """Merges saved metadata with the current meta, respecting overwrite rules.""" with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: saved_meta = json.load(f) @@ -180,26 +66,6 @@ def merge_meta(meta, saved_meta, path): return sanitized_saved_meta -def display_queue(queue, base_dir, queue_name, save_to_log=True): - """Displays the queued files in markdown format and optionally saves them to a log file in the tmp directory.""" - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") - - if save_to_log: - tmp_dir = os.path.join(base_dir, "tmp") - os.makedirs(tmp_dir, exist_ok=True) - log_file = os.path.join(tmp_dir, f"{queue_name}_queue.log") - - try: - with open(log_file, 'w') as f: - json.dump(queue, f, indent=4) - console.print(f"[bold green]Queue successfully saved to log file: {log_file}") - except Exception as e: - console.print(f"[bold red]Failed to save queue to log file: {e}") - - async def process_meta(meta, base_dir): """Process the metadata for each queued path.""" @@ -222,32 +88,63 @@ async def process_meta(meta, base_dir): if 'image_list' not in meta: meta['image_list'] = [] return_dict = {} - new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) - - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) + new_images, dummy_var = upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: meta['image_list'] = [] + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") if not os.path.exists(torrent_path): reuse_torrent = None if meta.get('rehash', False) is False: reuse_torrent = await client.find_existing_torrent(meta) if reuse_torrent is not None: - prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) + await create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) if meta['nohash'] is False and reuse_torrent is None: - prep.create_torrent(meta, Path(meta['path']), "BASE") + create_torrent(meta, Path(meta['path']), "BASE") if meta['nohash']: meta['client'] = "none" elif os.path.exists(torrent_path) and meta.get('rehash', False) is True and meta['nohash'] is False: - prep.create_torrent(meta, Path(meta['path']), "BASE") + create_torrent(meta, Path(meta['path']), "BASE") if int(meta.get('randomized', 0)) >= 1: - prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + + +async def get_log_file(base_dir, queue_name): + """ + Returns the path to the log file for the given base directory and queue name. + """ + safe_queue_name = queue_name.replace(" ", "_") + return os.path.join(base_dir, "tmp", f"{safe_queue_name}_processed_files.log") + + +async def load_processed_files(log_file): + """ + Loads the list of processed files from the log file. + """ + if os.path.exists(log_file): + with open(log_file, "r") as f: + return set(json.load(f)) + return set() + + +async def save_processed_file(log_file, file_path): + """ + Adds a processed file to the log. + """ + processed_files = await load_processed_files(log_file) + processed_files.add(file_path) + with open(log_file, "w") as f: + json.dump(list(processed_files), f, indent=4) async def do_the_thing(base_dir): @@ -271,168 +168,8 @@ async def do_the_thing(base_dir): path = os.path.abspath(path) if path.endswith('"'): path = path[:-1] - queue = [] - - log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") - allowed_extensions = ['.mkv', '.mp4', '.ts'] - - if path.endswith('.txt') and meta.get('unit3d'): - console.print(f"[bold yellow]Detected a text file for queue input: {path}[/bold yellow]") - if os.path.exists(path): - safe_file_locations = extract_safe_file_locations(path) - if safe_file_locations: - console.print(f"[cyan]Extracted {len(safe_file_locations)} safe file locations from the text file.[/cyan]") - queue = safe_file_locations - meta['queue'] = "unit3d" - - # Save the queue to the log file - try: - with open(log_file, 'w') as f: - json.dump(queue, f, indent=4) - console.print(f"[bold green]Queue log file saved successfully: {log_file}[/bold green]") - except IOError as e: - console.print(f"[bold red]Failed to save the queue log file: {e}[/bold red]") - exit(1) - else: - console.print("[bold red]No safe file locations found in the text file. Exiting.[/bold red]") - exit(1) - else: - console.print(f"[bold red]Text file not found: {path}. Exiting.[/bold red]") - exit(1) - elif path.endswith('.log') and meta['debug']: - console.print(f"[bold yellow]Processing debugging queue:[/bold yellow] [bold green{path}[/bold green]") - if os.path.exists(path): - log_file = path - with open(path, 'r') as f: - queue = json.load(f) - meta['queue'] = "debugging" - - else: - console.print(f"[bold red]Log file not found: {path}. Exiting.[/bold red]") - exit(1) - - elif meta.get('queue'): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - if os.path.exists(log_file): - with open(log_file, 'r') as f: - existing_queue = json.load(f) - console.print(f"[bold yellow]Found an existing queue log file:[/bold yellow] [green]{log_file}[/green]") - console.print(f"[cyan]The queue log contains {len(existing_queue)} items.[/cyan]") - console.print("[cyan]Do you want to edit, discard, or keep the existing queue?[/cyan]") - edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ").strip().lower() - - if edit_choice == 'e': - edited_content = click.edit(json.dumps(existing_queue, indent=4)) - if edited_content: - try: - queue = json.loads(edited_content.strip()) - console.print("[bold green]Successfully updated the queue from the editor.") - with open(log_file, 'w') as f: - json.dump(queue, f, indent=4) - except json.JSONDecodeError as e: - console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") - queue = existing_queue - else: - console.print("[bold red]No changes were made. Using the original queue.") - queue = existing_queue - elif edit_choice == 'd': - console.print("[bold yellow]Discarding the existing queue log. Creating a new queue.") - queue = [] - else: - console.print("[bold green]Keeping the existing queue as is.") - queue = existing_queue - else: - if os.path.exists(path): - queue = gather_files_recursive(path, allowed_extensions=allowed_extensions) - else: - queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) - - console.print(f"[cyan]A new queue log file will be created:[/cyan] [green]{log_file}[/green]") - console.print(f"[cyan]The new queue will contain {len(queue)} items.[/cyan]") - console.print("[cyan]Do you want to edit the initial queue before saving?[/cyan]") - edit_choice = input("Enter 'e' to edit, or press Enter to save as is: ").strip().lower() - - if edit_choice == 'e': - edited_content = click.edit(json.dumps(queue, indent=4)) - if edited_content: - try: - queue = json.loads(edited_content.strip()) - console.print("[bold green]Successfully updated the queue from the editor.") - except json.JSONDecodeError as e: - console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") - else: - console.print("[bold red]No changes were made. Using the original queue.") - - # Save the queue to the log file - with open(log_file, 'w') as f: - json.dump(queue, f, indent=4) - console.print(f"[bold green]Queue log file created: {log_file}[/bold green]") - - elif os.path.exists(path): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - queue = [path] - - else: - # Search glob if dirname exists - if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: - escaped_path = path.replace('[', '[[]') - globs = glob.glob(escaped_path) - queue = globs - if len(queue) != 0: - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") - else: - console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") - - elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: - queue = paths - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") - elif not os.path.exists(os.path.dirname(path)): - split_path = path.split() - p1 = split_path[0] - for i, each in enumerate(split_path): - try: - if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): - queue.append(p1) - p1 = split_path[i + 1] - else: - p1 += f" {split_path[i + 1]}" - except IndexError: - if os.path.exists(p1): - queue.append(p1) - else: - console.print(f"[red]Path: [bold red]{p1}[/bold red] does not exist") - if len(queue) >= 1: - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") - - else: - # Add Search Here - console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") - exit() - - if not queue: - console.print(f"[red]No valid files or directories found for path: {path}") - exit(1) - - if meta.get('queue'): - queue_name = meta['queue'] - log_file = get_log_file(base_dir, meta['queue']) - processed_files = load_processed_files(log_file) - queue = [file for file in queue if file not in processed_files] - if not queue: - console.print(f"[bold yellow]All files in the {meta['queue']} queue have already been processed.") - exit(0) - if meta['debug']: - display_queue(queue, base_dir, queue_name, save_to_log=False) + queue, log_file = await handle_queue(path, meta, paths, base_dir) processed_files_count = 0 base_meta = {k: v for k, v in meta.items()} @@ -451,7 +188,7 @@ async def do_the_thing(base_dir): if os.path.exists(meta_file): with open(meta_file, "r") as f: saved_meta = json.load(f) - meta.update(merge_meta(meta, saved_meta, path)) + meta.update(await merge_meta(meta, saved_meta, path)) else: if meta['debug']: console.print(f"[yellow]No metadata file found at {meta_file}") @@ -459,7 +196,7 @@ async def do_the_thing(base_dir): except Exception as e: console.print(f"[red]Failed to load metadata for path '{path}': {e}") if meta['debug']: - upload_start_time = time.time() + start_time = time.time() console.print(f"[green]Gathering info for {os.path.basename(path)}") await process_meta(meta, base_dir) if 'we_are_uploading' not in meta: @@ -469,169 +206,19 @@ async def do_the_thing(base_dir): console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") if not meta['debug']: if log_file: - save_processed_file(log_file, path) + await save_processed_file(log_file, path) else: - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - - #################################### - ####### Upload to Trackers ####### # noqa #F266 - #################################### - - common = COMMON(config=config) - tracker_setup = TRACKER_SETUP(config=config) - enabled_trackers = tracker_setup.trackers_enabled(meta) - - async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): - modq, draft = None, None - - tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) - - # Handle BHD specific draft/live logic - if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): - draft_int = await tracker_class.get_live(meta) - draft = "Draft" if draft_int == 0 else "Live" - - # Handle mod_q and draft for other trackers - else: - if tracker_caps.get('mod_q'): - modq = await tracker_class.get_flag(meta, 'modq') - modq = 'Yes' if modq else 'No' - if tracker_caps.get('draft'): - draft = await tracker_class.get_flag(meta, 'draft') - draft = 'Yes' if draft else 'No' - - return modq, draft - - for tracker in enabled_trackers: - disctype = meta.get('disctype', None) - tracker = tracker.replace(" ", "").upper().strip() - if meta['name'].endswith('DUPE?'): - meta['name'] = meta['name'].replace(' DUPE?', '') - - if meta['debug']: - debug = "(DEBUG)" - else: - debug = "" - - if tracker in api_trackers: - tracker_class = tracker_class_map[tracker](config=config) - tracker_status = meta.get('tracker_status', {}) - upload_status = tracker_status.get(tracker, {}).get('upload', False) - console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") - - if upload_status: - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") - if meta['debug']: - upload_finish_time = time.time() - console.print(f"Upload from Audionut UA processed in {upload_finish_time - upload_start_time:.2f} seconds") - await tracker_class.upload(meta, disctype) - await asyncio.sleep(0.5) - perm = config['DEFAULT'].get('get_permalink', False) - if perm: - # need a wait so we don't race the api - await asyncio.sleep(5) - await tracker_class.search_torrent_page(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) - - if tracker in other_api_trackers: - tracker_class = tracker_class_map[tracker](config=config) - tracker_status = meta.get('tracker_status', {}) - upload_status = tracker_status.get(tracker, {}).get('upload', False) - console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") - - if upload_status: - console.print(f"Uploading to {tracker_class.tracker}") - - if tracker != "TL": - if tracker == "RTF": - await tracker_class.api_test(meta) - if tracker == "TL" or upload_status: - await tracker_class.upload(meta, disctype) - if tracker == 'SN': - await asyncio.sleep(16) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) - - if tracker in http_trackers: - tracker_class = tracker_class_map[tracker](config=config) - tracker_status = meta.get('tracker_status', {}) - upload_status = tracker_status.get(tracker, {}).get('upload', False) - console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") - - if upload_status: - console.print(f"Uploading to {tracker}") - - if await tracker_class.validate_credentials(meta) is True: - await tracker_class.upload(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) - - if tracker == "MANUAL": - if meta['unattended']: - do_manual = True - else: - do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) - if do_manual: - for manual_tracker in enabled_trackers: - if manual_tracker != 'MANUAL': - manual_tracker = manual_tracker.replace(" ", "").upper().strip() - tracker_class = tracker_class_map[manual_tracker](config=config) - if manual_tracker in api_trackers: - await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) - else: - await tracker_class.edit_desc(meta) - url = await prep.package(meta) - if url is False: - console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") - else: - console.print(f"[green]{meta['name']}") - console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") - - if tracker == "THR": - tracker_status = meta.get('tracker_status', {}) - upload_status = tracker_status.get(tracker, {}).get('upload', False) - print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") - - if upload_status: - thr = THR(config=config) - try: - with requests.Session() as session: - console.print("[yellow]Logging in to THR") - session = thr.login(session) - await thr.upload(session, meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, "THR") - except Exception: - console.print(traceback.format_exc()) - - if tracker == "PTP": - tracker_status = meta.get('tracker_status', {}) - upload_status = tracker_status.get(tracker, {}).get('upload', False) - print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[yellow]") - - if upload_status: - ptp = PTP(config=config) - groupID = meta['ptp_groupID'] - ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) - await ptp.upload(meta, ptpUrl, ptpData, disctype) - await asyncio.sleep(5) - await client.add_to_client(meta, "PTP") - + await process_trackers(meta, config, client, console, api_trackers, tracker_class_map, http_trackers, other_api_trackers) if meta.get('queue') is not None: processed_files_count += 1 console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") if not meta['debug']: if log_file: - save_processed_file(log_file, path) + await save_processed_file(log_file, path) + if meta['debug']: + finish_time = time.time() + console.print(f"Uploads processed in {finish_time - start_time:.4f} seconds") if __name__ == '__main__':